class HttpData:
    def load(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {"Referer": url}
            response = xbmcup.net.http.get(url, cookies=self.cookie, headers=headers)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if response.status_code == 200:
                if self.auth.check_auth(response.text) == False:
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {"Referer": url}
            response = xbmcup.net.http.post(url, data, cookies=self.cookie, headers=headers)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if response.status_code == 200:
                if self.auth.check_auth(response.text) == False:
                    self.auth.autorize()
                return response.text
            return None

    def ajax(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {"X-Requested-With": "XMLHttpRequest", "Referer": SITE_URL}
            response = xbmcup.net.http.get(url, cookies=self.cookie, headers=headers)
            print url
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None

    def get_movies(self, url, page, classname="main_content_item", nocache=False, search="", itemclassname="item"):
        page = int(page)
        if page > 0:
            url = SITE_URL + "/" + url.strip("/") + "/page/" + str(page + 1)
        else:
            url = SITE_URL + "/" + url.strip("/")
        print url

        if search != "" and page == 0:
            html = self.post(url, {"usersearch": search, "filter": "all"})
        else:
            html = self.load(url)

        # print html.encode('utf-8')

        if not html:
            return None, {"page": {"pagenum": 0, "maxpage": 0}, "data": []}
        result = {"page": {}, "data": []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        result["page"] = self.get_page(soup)
        center_menu = soup.find("div", class_=classname)
        # print center_menu
        try:
            for div in center_menu.find_all("div", class_=itemclassname):
                if search != "":
                    href = None
                else:
                    href = div.find("h2").find("a")
                try:
                    quality = div.find("span", class_="quality_film_title").get_text().strip()
                except:
                    quality = ""

                dop_information = []
                try:
                    if itemclassname == "item_wrap":
                        year = div.find("a", class_="fast_search").get_text().strip()
                    else:
                        year = div.find("div", class_="smoll_year").get_text().strip()
                    dop_information.append(year)
                except:
                    pass

                try:
                    if itemclassname == "item_wrap":
                        genre = div.find("span", class_="section_item_list").get_text().strip()
                    else:
                        genre = div.find("div", class_="smoll_janr").get_text().strip()
                    dop_information.append(genre)
                except:
                    pass

                information = ""
                if len(dop_information) > 0:
                    information = "[COLOR white][" + ", ".join(dop_information) + "][/COLOR]"

                posters = div.find("div", class_="preview").find_all("img")
                movieposter = None
                for img in posters:
                    img_src = img.get("src")
                    if img_src.find("http") != -1:
                        movieposter = img_src
                        if search != "":
                            href = img.parent
                        break

                if href == None:
                    raise

                if search != "":
                    name = href.find("img").get("alt").strip()
                else:
                    name = href.get_text().strip()

                movie_url = (href.get("href"),)
                movie_id = re.compile("/film/([\d]+)-", re.S).findall(movie_url[0])[0]

                result["data"].append(
                    {
                        "url": movie_url,
                        "id": movie_id,
                        "quality": self.format_quality(quality),
                        "year": information,
                        "name": name,
                        "img": None if not movieposter else movieposter,
                    }
                )

            # print result['data']
        except:
            print traceback.format_exc()

        if nocache:
            return None, result
        else:
            return cache_minutes, result

    def get_movie_info(self, url):
        url = SITE_URL + url[0]
        html = self.load(url)
        print url.encode("utf-8")
        movieInfo = {}
        movieInfo["no_files"] = None
        movieInfo["episodes"] = True
        movieInfo["movies"] = []
        movieInfo["resolutions"] = []

        if not html:
            movieInfo["no_files"] = "HTTP error"
            return movieInfo

        html = html.encode("utf-8")
        soup = xbmcup.parser.html(self.strip_scripts(html))

        js_string = (
            re.compile("'source' : \$\.parseJSON\('([^']+)'\)", re.S)
            .findall(html)[0]
            .decode("string_escape")
            .decode("utf-8")
        )
        movies = json.loads(js_string, "utf-8")
        # print movies
        if movies != None and len(movies) > 0:
            for window_id in movies:
                current_movie = {"folder_title": "", "movies": {}}
                try:
                    current_movie["folder_title"] = (
                        soup.find("div", {"data-folder": str(window_id)}).find("a").get("title").encode("utf-8")
                    )
                except:
                    current_movie["folder_title"] = xbmcup.app.lang[30113]

                sort_movies = sorted(movies[window_id].items(), key=lambda (k, v): int(k))
                for movie in sort_movies:
                    try:
                        current_movie["movies"][movie[0]].append(movie[1])
                    except:
                        current_movie["movies"][movie[0]] = []
                        current_movie["movies"][movie[0]].append(movie[1])

                for resulut in current_movie["movies"]:
                    current_movie["movies"][resulut] = current_movie["movies"][resulut][0]
                    # if(len(current_movie['movies'][resulut]) > 1):
                    #     movieInfo['episodes'] = True

                movieInfo["movies"].append(current_movie)

            movieInfo["title"] = soup.find("h1", id="film_object_name").get_text()
            try:
                movieInfo["description"] = soup.find("div", class_="description").get_text().strip()
            except:
                movieInfo["description"] = ""

            try:
                movieInfo["fanart"] = SITE_URL + soup.find("div", class_="screen_bg").find("a").get("href")
            except:
                movieInfo["fanart"] = ""
            try:
                movieInfo["cover"] = soup.find("img", id="preview_img").get("src")
            except:
                movieInfo["cover"] = ""
            try:
                movieInfo["genres"] = []
                genres = soup.find("div", class_="list_janr").findAll("a")
                for genre in genres:
                    movieInfo["genres"].append(genre.get_text().strip())
                movieInfo["genres"] = " / ".join(movieInfo["genres"]).encode("utf-8")
            except:
                movieInfo["genres"] = ""

            try:
                results = soup.findAll("a", class_="fast_search")
                movieInfo["year"] = self.get_year(results)
            except:
                movieInfo["year"] = ""
            try:
                movieInfo["director"] = soup.find("span", class_="regiser_item").get_text().encode("utf-8")
            except:
                movieInfo["director"] = ""
        else:
            try:
                no_files = soup.find("div", class_="no_files").get_text().strip().encode("utf-8")
            except:
                no_files = ""

            movieInfo["no_files"] = no_files

        return movieInfo

    def get_collections(self):
        url = SITE_URL + "/collection"
        html = self.load(url)
        if not html:
            return None, {"page": {"pagenum": 0, "maxpage": 10}, "data": []}
        html = html.encode("utf-8")
        result = {"page": {}, "data": []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find("div", class_="main_content_item")
        try:
            for div in wrap.find_all("div", class_="item"):
                try:
                    preview_img = div.find("div", class_="preview").find("img").get("src")
                except:
                    preview_img = ""

                try:
                    movie_count = div.find("div", class_="item_content").find("span").get_text().strip()
                except:
                    movie_count = ""

                try:
                    href = div.find("div", class_="item_content").find("a")
                    name = href.get_text().strip() + (" (%s)" % movie_count if movie_count != "" else "")
                    href = href.get("href")
                except:
                    name = ""
                    href = ""

                result["data"].append(
                    {"url": href, "name": name, "img": None if not preview_img else (SITE_URL + preview_img)}
                )

        except:
            print traceback.format_exc()

        return cache_minutes, result

    def get_bookmarks(self):
        url = "%s/users/profile/bookmark" % SITE_URL

        # self.ajax('%s/users/profile/addbookmark?name=%s' % (SITE_URL, BOOKMARK_DIR))

        html = self.load(url)
        if not html:
            return None, {"page": {"pagenum": 0, "maxpage": 0}, "data": []}
        html = html.encode("utf-8")
        result = {"page": {}, "data": []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find("div", id="bookmark_list")

        try:
            for div in wrap.find_all("a"):
                try:
                    href = div.get("data-rel")
                    name = div.get_text().strip()
                except:
                    name = ""
                    href = ""

                result["data"].append({"url": href, "name": name, "img": cover.treetv})

        except:
            print traceback.format_exc()

        return None, result

    def get_year(self, results):
        for res in results:
            if res.get("data-rel") == "year1":
                return res.get_text().encode("utf-8")
        return 0

    def strip_scripts(self, html):
        # удаляет все теги <script></script> и их содержимое
        # сделал для того, что бы html parser не ломал голову на тегах в js
        return re.compile(r"<script[^>]*>(.*?)</script>", re.S).sub("", html)

    def format_quality(self, quality):
        qualitys = {"HD": "ff3BADEE", "HQ": "ff59C641", "SQ": "ffFFB119", "LQ": "ffDE4B64"}
        if quality in qualitys:
            return "[COLOR %s][%s][/COLOR]" % (qualitys[quality], quality)
        return "[COLOR ffDE4B64][%s][/COLOR]" % quality if quality != "" else ""

    def get_page(self, soup):
        info = {"pagenum": 0, "maxpage": 0}
        try:
            try:
                wrap = soup.find("div", id="main_paginator")
                wrap.find("b")
            except:
                wrap = soup.find("div", class_="paginationControl")

            info["pagenum"] = int(wrap.find("b").get_text().encode("utf-8"))
            try:
                info["maxpage"] = int(wrap.find("a", class_="last").get("data-rel"))
            except:
                try:
                    try:
                        info["maxpage"] = int(os.path.basename(wrap.find("a", class_="next").get("href")))
                    except:
                        info["maxpage"] = wrap.find("a", class_="next").get("data-rel")
                except:
                    info["maxpage"] = info["pagenum"]
        except:
            info["pagenum"] = 1
            info["maxpage"] = 1
            print traceback.format_exc()

        return info
示例#2
0
class HttpData:

    mycookie = None

    def load(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            cook = self.mycookie if self.cookie == None else self.cookie
            response = xbmcup.net.http.get(url, cookies=cook, verify=False)
            if(self.cookie == None):
                self.mycookie = response.cookies
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            cook = self.mycookie if self.cookie == None else self.cookie
            response = xbmcup.net.http.post(url, data, cookies=cook, verify=False)

            if(self.cookie == None):
                self.mycookie = response.cookies

        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None


    def ajax(self, url, data={}, referer=False):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {
                'X-Requested-With' : 'XMLHttpRequest'
            }
            if(referer):
                headers['Referer'] = referer


            cook = self.mycookie if self.cookie == None else self.cookie
            if(len(data) > 0):
                response = xbmcup.net.http.post(url, data, cookies=cook, headers=headers, verify=False)
            else:
                response = xbmcup.net.http.get(url, cookies=cook, headers=headers, verify=False)

            if(self.cookie == None):
                self.mycookie = response.cookies

        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None


    def get_my_news(self, url, page, idname='dle-content', nocache=False, search="", itemclassname="shortstory"):
        page = int(page)

        url = SITE_URL+"/api/notifications/get"

        if(page > 0 and search == ''):
            page += 1
        else:
            page = 1

        post_data={'page' : page}

        html = self.ajax(url, post_data, SITE_URL + '/')

        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        result = {'page': {'pagenum' : page, 'maxpage' : 10000}, 'data': []}

        try:
            json_result = json.loads(html)
            result['page']['maxpage'] = len(json_result['message']['items'])

            for item_news in json_result['message']['items']:
                movie_name = item_news['data']['movie_name']
                movie_url = item_news['data']['movie_link']
                movie_id = item_news['id']
                quality_s = item_news['date_string']
                dop_info = 'S'+str(item_news['data']['season']) + 'E'+ str(item_news['data']['episode'])
                not_movie = False

                result['data'].append({
                        'url': movie_url,
                        'id': movie_id,
                        'not_movie': not_movie,
                        'quality': '[COLOR ff3BADEE]'+quality_s+'[/COLOR]',
                        'year': '[COLOR ffFFB119]'+dop_info+'[/COLOR]',
                        'name': movie_name.strip(),
                        'img': None
                    })
        except:
            print traceback.format_exc()

        if(nocache):
            return None, result
        else:
            return cache_minutes, result


    def get_movies(self, url, page, idname='dle-content', nocache=False, search="", itemclassname="shortstory"):
        page = int(page)

        if(page > 0 and search == ''):
            url = SITE_URL+"/"+url.strip('/')+"/page/"+str(page+1)
        else:
            url = SITE_URL+"/"+url.strip('/')

        # print url

        if(search != ''):
            html = self.ajax(url)
        else:
            html = self.load(url)

        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))

        if(search != ''):
            result['page'] = self.get_page_search(soup)
        else:
            result['page'] = self.get_page(soup)

        if(idname != ''):
            center_menu = soup.find('div', id=idname)
        else:
            center_menu = soup
        try:
            for div in center_menu.find_all('article', class_=itemclassname):
                href = div.find('div', class_='short')#.find('a')

                movie_name = div.find('div', class_='full').find('h2', class_='name').find('a').get_text()
                movie_url = href.find('a', class_='watch').get('href')
                movie_id = re.compile('/([\d]+)-', re.S).findall(movie_url)[0]

                not_movie = True
                try:
                    not_movie_test = div.find('span', class_='not-movie').get_text()
                except:
                    not_movie = False

                try:
                    quality = div.find('div', class_='full').find('div', class_='quality').get_text().strip()
                except:
                    quality = ''

                dop_information = []
                try:
                    likes = soup.find(class_='like', attrs={'data-id' : movie_id}).find('span').get_text()
                    i_likes = int(likes)
                    if i_likes != 0:
                        if i_likes > 0:
                            likes = '[COLOR ff59C641]' + likes + '[/COLOR]'
                        else:
                            likes = '[COLOR ffDE4B64]' + likes + '[/COLOR]'
                        dop_information.append(likes)
                except:
                    pass

                try:
                    year = div.find('div', class_='item year').find('a').get_text().strip()
                    dop_information.append(year)
                except:
                    pass

                try:
                    genre = div.find('div', class_='category').find(class_='item-content').get_text().strip()
                    dop_information.append(genre)
                except:
                    print traceback.format_exc()

                information = ''
                if(len(dop_information) > 0):
                    information = '[COLOR white]['+', '.join(dop_information)+'][/COLOR]'

                movieposter = self.format_poster_link( href.find('img', class_='poster poster-tooltip').get('src') )

                result['data'].append({
                        'url': movie_url,
                        'id': movie_id,
                        'not_movie': not_movie,
                        'quality': self.format_quality(quality),
                        'year': information,
                        'name': movie_name.strip(),
                        'img': None if not movieposter else movieposter
                    })
        except:
            print traceback.format_exc()

        if(nocache):
            return None, result
        else:
            return cache_minutes, result

    def decode_base64(self, encoded_url):
        codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
        codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
        i = 0
        for a in codec_a:
            b = codec_b[i]
            i += 1
            encoded_url = encoded_url.replace(a, '___')
            encoded_url = encoded_url.replace(b, a)
            encoded_url = encoded_url.replace('___', b)
        return base64.b64decode(encoded_url)

    def decode_unicode(self, encoded_url):

        def grouper(n, iterable, fillvalue=None):
            "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
            args = [iter(iterable)] * n
            return izip_longest(fillvalue=fillvalue, *args)

        _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
        tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _))
        return ''.join(tokens).decode('unicode_escape')

    def decode_direct_media_url(self, encoded_url, checkhttp=False):
        if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)):
            return False

        try:
            if encoded_url.find('#') != -1:
                return self.decode_unicode(encoded_url)
            else:
                return self.decode_base64(encoded_url)
        except:
            return False

    def format_poster_link(self, link):
        # fix for .cc
        r_link = link.replace('https://filmix.co' , SITE_URL)
        # fix for .live .co .net
        return r_link if r_link.find( SITE_URL ) != -1 else SITE_URL + r_link

    def format_direct_link(self, source_link, q):
        regex = re.compile("\[([^\]]+)\]", re.IGNORECASE)
        return regex.sub(q, source_link)

    def get_qualitys(self, source_link):
        try:
            avail_quality = re.compile("\[([^\]]+)\]", re.S).findall(source_link)[0]
            return avail_quality.split(',')
        except:
            return '0'.split()

    def get_collections_info(self):
        html = self.load(COLLECTIONS_URL)
        collectionsInfo = []
        
        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))
        
        collections = soup.find_all('a', class_='poster-link poster-hover')
        for collection in collections:
            url_collection = collection.get('href').replace(SITE_URL,'')
            obj_poster = collection.find(class_ = 'poster')
            title_collection = obj_poster.get('alt')
            img_collection = self.format_poster_link( obj_poster.get('src') )
            if img_collection.find('/none.png') > 0: img_collection = cover.treetv
            
            collectionsInfo.append({'url':url_collection, 'img':img_collection, 'title':title_collection});

        return collectionsInfo

    def get_movie_info(self, url):
        html = self.load(url)

        movieInfo = {}
        
        movieInfo['page_url'] = url
        
        movieInfo['no_files'] = None
        movieInfo['episodes'] = True
        movieInfo['movies'] = []
        movieInfo['resolutions'] = []

        if not html:
            movieInfo['no_files'] = 'HTTP error'
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        try:
            movieInfo['is_proplus'] = len(soup.find('span', class_='proplus'))
        except:
            movieInfo['is_proplus'] = 0

        # print self.strip_scripts(html)
        try:
            try:
                film_id = re.compile('film_id ?= ?([\d]+);', re.S).findall(html)[0].decode('string_escape').decode('utf-8')
                movieInfo['movie_id'] = int( film_id )
                js_string = self.ajax(SITE_URL+'/api/movies/player_data', {'post_id' : film_id}, url)
                # print js_string
                player_data =  json.loads(js_string, 'utf-8')
                # player_data = player_data['message']['translations']['flash']
                player_data = player_data['message']['translations']['html5']
                if player_data == []:
                    movieInfo['no_files'] = xbmcup.app.lang[34026].encode('utf8')
            except:
                movieInfo['no_files'] = xbmcup.app.lang[34026].encode('utf8')
                raise

            for translate in player_data:
                js_string = self.decode_direct_media_url(player_data[translate], True)
                if(js_string == False):
                    continue
                if(js_string.find('.txt') != -1):
                    playlist = self.decode_direct_media_url(self.load(js_string))

                    movies = json.loads(playlist, 'utf-8')
                    # print movies
                    for season in movies['playlist']:
                        current_movie = {'folder_title' : season['comment']+' ('+translate+')', 'movies': {}, 'translate': translate}

                        for movie in season['playlist']:
                            avail_quality = self.get_qualitys(movie['file'])
                            for q in avail_quality:
                                if(q == ''): continue
                                direct_link = self.format_direct_link(movie['file'], q) if q != 0 else movie['file']
                                
                                try:
                                    iseason = int(movie['season'])
                                except:
                                    iseason = 0
                                
                                try:
                                    iserieId = int(movie['serieId'])
                                except:
                                    iserieId = 0

                                try:
                                    current_movie['movies'][q].append([direct_link, iseason, iserieId])
                                except:
                                    current_movie['movies'][q] = []
                                    current_movie['movies'][q].append([direct_link, iseason, iserieId])
                                current_movie['season'] = iseason

                        #for resulut in current_movie['movies']:
                        #    current_movie['movies'][resulut] = current_movie['movies'][resulut][0]

                        movieInfo['movies'].append(current_movie)

                elif(js_string.find('http://') != -1 or js_string.find('https://') != -1):
                    avail_quality = self.get_qualitys(js_string)
                    current_movie = {'folder_title': translate, 'translate': translate, 'movies': {}}
                    for q in avail_quality:
                        if(q == ''): continue
                        direct_link = self.format_direct_link(js_string, q) if q != 0 else js_string
                        try:
                            current_movie['movies'][q].append([direct_link, 1, 1])
                        except:
                            current_movie['movies'][q] = []
                            current_movie['movies'][q].append([direct_link, 1, 1])

                    movieInfo['movies'].append(current_movie)

            movieInfo['title'] = soup.find('h1', class_='name').get_text()
            try:
                movieInfo['originaltitle'] = soup.find('div', class_='origin-name').get_text().strip()
            except:
                movieInfo['originaltitle'] = ''

            try:
                r_kinopoisk = soup.find('span', class_='kinopoisk btn-tooltip icon-kinopoisk').find('p').get_text().strip()
                if float(r_kinopoisk) == 0: r_kinopoisk = ''
            except:
                r_kinopoisk = ''

            try:
                r_imdb = soup.find('span', class_='imdb btn-tooltip icon-imdb').find('p').get_text().strip()
                movieInfo['ratingValue'] = float(r_imdb)
                movieInfo['ratingCount'] = r_imdb
            except:
                r_imdb = ''
                movieInfo['ratingValue'] = 0
                movieInfo['ratingCount'] = 0

            if r_kinopoisk != '': r_kinopoisk = ' [COLOR orange]Кинопоиск[/COLOR] : '.decode('cp1251') + r_kinopoisk

            if movieInfo['ratingValue'] != 0:
                r_imdb = ' [COLOR yellow]IMDB[/COLOR] : ' + r_imdb
            else:
                r_imdb = ''

            s_rating = r_kinopoisk + r_imdb + ' \n '
            
            try:
                movieInfo['description'] = s_rating + soup.find('div', class_='full-story').get_text().strip()
            except:
                movieInfo['description'] = ''

            try:
                movieInfo['fanart'] = SITE_URL + soup.find('ul', class_='frames-list').find('a').get('href')
            except:
                movieInfo['fanart'] = ''
            try:
                movieInfo['cover'] = soup.find('a', class_='fancybox').get('href')
            except:
                movieInfo['cover'] = ''

            try:
                movieInfo['genres'] = []
                genres = soup.find_all(attrs={'itemprop' : 'genre'})
                for genre in genres:
                   movieInfo['genres'].append(genre.get_text().strip())
                movieInfo['genres'] = ' / '.join(movieInfo['genres']).encode('utf-8')
            except:
                movieInfo['genres'] = ''

            try:
                movieInfo['year'] = soup.find('div', class_='item year').find('a').get_text()
            except:
                movieInfo['year'] = ''

            try:
                movieInfo['durarion'] = soup.find('div', class_='item durarion').get('content')
                movieInfo['durarion'] = int(movieInfo['durarion'])*60
            except:
                movieInfo['durarion'] = ''

            movieInfo['is_serial'] = soup.find('div', class_='item xfgiven_added') is not None

            # try:
                # movieInfo['ratingValue'] = float(soup.find(attrs={'itemprop' : 'ratingValue'}).get_text())
            # except:
                # movieInfo['ratingValue'] = 0

            # try:
                # movieInfo['ratingCount'] = int(soup.find(attrs={'itemprop' : 'ratingCount'}).get_text())
            # except:
                # movieInfo['ratingCount'] = 0

            try:
                movieInfo['director'] = []
                directors = soup.find('div', class_='item directors').findAll('a')
                for director in directors:
                   movieInfo['director'].append(director.get_text().strip())
                movieInfo['director'] = ', '.join(movieInfo['director']).encode('utf-8')
            except:
                movieInfo['director'] = ''
        except:
            print traceback.format_exc()

        #print movieInfo

        return movieInfo

    def get_modal_info(self, url):
        html = self.load(url)
        movieInfo = {}
        movieInfo['error'] = False
        if not html:
            movieInfo['error'] = True
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        try:
            movieInfo['desc'] = soup.find('div', class_='full-story').get_text().strip()
        except:
            movieInfo['desc'] = ''

        try:
            movieInfo['title'] = soup.find('h1', class_='name').get_text()
        except:
            movieInfo['title'] = ''

        try:
            movieInfo['originaltitle'] = soup.find('div', class_='origin-name').get_text().strip()
        except:
            movieInfo['originaltitle'] = ''

        if(movieInfo['originaltitle'] != ''):
             movieInfo['title'] = '%s / %s' % (movieInfo['title'],  movieInfo['originaltitle'])

        try:
            movieInfo['poster'] = self.format_poster_link( soup.find('img', class_='poster poster-tooltip').get('src') )
        except:
            movieInfo['poster'] = ''

        movieInfo['desc'] = ''
        try:
            infos = soup.find('div', class_='full min').find_all('div', class_="item")
            skip = True
            for div in infos:
                if(skip):
                    skip = False
                    continue
                movieInfo['desc'] += self.format_desc_item(div.get_text().strip())+"\n"
        except:
           movieInfo['desc'] = traceback.format_exc()

        try:
            div = soup.find('div', class_='full-panel').find('span', class_='kinopoisk')
            rvalue = div.find('div', attrs={'itemprop' : 'ratingValue'}).get_text().strip()
            rcount = div.find('div', attrs={'itemprop' : 'ratingCount'}).get_text().strip()
            kp = xbmcup.app.lang[34029] % (self.format_rating(rvalue), rvalue, rcount)
            movieInfo['desc'] += kp+"\n"
        except:
            pass

        try:
            div = soup.find('div', class_='full-panel').find('span', class_='imdb').find_all('div')
            rvalue = div[0].get_text().strip()
            rcount = div[1].get_text().strip()
            kp = xbmcup.app.lang[34030] % (self.format_rating(rvalue), rvalue, rcount)
            movieInfo['desc'] += kp+"\n"
        except:
            pass

        try:
            desc = soup.find('div', class_='full-story').get_text().strip()
            movieInfo['desc'] = '\n[COLOR blue]%s[/COLOR]\n%s' % (xbmcup.app.lang[34027], desc) + '\n' + movieInfo['desc']
        except:
            movieInfo['desc'] = traceback.format_exc()

        try:
            movieInfo['trailer'] = soup.find('li', attrs={'data-id' : "trailers"}).find('a').get('href')
        except:
            movieInfo['trailer'] = False

        return movieInfo

    def my_int(self, str):
        if(str == ''):
            return 0
        return int(str)

    def get_trailer(self, url):
        progress = xbmcgui.DialogProgress()
        progress.create(xbmcup.app.addon['name'])
        progress.update(0)
        html = self.load(url)
        movieInfo = {}
        movieInfo['error'] = False
        if not html:
            xbmcup.gui.message(xbmcup.app.lang[34031].encode('utf8'))
            progress.update(0)
            progress.close()
            return False

        progress.update(50)
        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        link = self.decode_direct_media_url(soup.find('input', id='video5-link').get('value'))
        avail_quality = max(map(self.my_int, self.get_qualitys(link)))
        progress.update(100)
        progress.close()
        return self.format_direct_link(link, str(avail_quality))

    def format_desc_item(self, text):
        return re.compile(r'^([^:]+:)', re.S).sub('[COLOR blue]\\1[/COLOR] ', re.sub(r'\s+', ' ', text) )


    def strip_scripts(self, html):
        html = re.compile(r'<head[^>]*>(.*?)</head>', re.S).sub('<head></head>', html)
        #удаляет все теги <script></script> и их содержимое
        #сделал для того, что бы html parser не ломал голову на тегах в js
        return re.compile(r'<script[^>]*>(.*?)</script>', re.S).sub('', html)

    def format_rating(self, rating):
        rating = float(rating)
        if(rating == 0):
            return 'white'
        elif(rating > 7):
            return 'ff59C641'
        elif(rating > 4):
            return 'ffFFB119'
        else:
            return 'ffDE4B64'


    def format_quality(self, quality):
        if(quality == ''): return ''
        if(quality.find('1080') != -1):
            q = 'HD'
        elif(quality.find('720') != -1):
            q = 'HQ'
        elif(quality.find('480') != -1):
            q = 'SQ'
        else:
            q = 'LQ'

        qualitys = {'HD' : 'ff3BADEE', 'HQ' : 'ff59C641', 'SQ' : 'ffFFB119', 'LQ' : 'ffDE4B64'}
        if(q in qualitys):
            return "[COLOR %s][%s][/COLOR]" % (qualitys[q], quality)
        return ("[COLOR ffDE4B64][%s][/COLOR]" % quality if quality != '' else '')


    def get_page(self, soup):
        info = {'pagenum' : 0, 'maxpage' : 0}
        try:
            wrap  = soup.find('div', class_='navigation')
            info['pagenum'] = int(wrap.find('span', class_='').get_text())
            try:
                info['maxpage'] = len(wrap.find('a', class_='next'))
                if(info['maxpage'] > 0):
                    info['maxpage'] = info['pagenum']+1
            except:
                info['maxpage'] = info['pagenum']
                print traceback.format_exc()

        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info


    def get_page_search(self, soup):
        info = {'pagenum' : 0, 'maxpage' : 0}
        try:
            wrap  = soup.find('div', class_='navigation')
            current_page = wrap.find_all('span', class_='')
            info['pagenum'] = 1
            for cpage in current_page:
                if(cpage.get_text().find('...') == -1):
                    info['pagenum'] = int(cpage.get_text())
                    break

            try:
                clicks = wrap.find_all('span', class_='click')
                pages = []
                for page in clicks:
                    pages.append(int(page.get_text()))

                info['maxpage'] = max(pages)
            except:
                info['maxpage'] = info['pagenum']
                print traceback.format_exc()

        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info

    def get_movie_id(self, url):
        result = re.findall(r'\/([\d]+)\-', url)
        
        try:
            result = int(result[0])
        except:
            result = 0
            
        return result
class HttpData:

    def load(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            response = xbmcup.net.http.get(url, cookies=self.cookie)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            response = xbmcup.net.http.post(url, data, cookies=self.cookie)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None


    def ajax(self, url, data={}):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {
                'X-Requested-With' : 'XMLHttpRequest'
            }
            if(len(data) > 0):
                response = xbmcup.net.http.post(url, data, cookies=self.cookie, headers=headers)
            else:
                response = xbmcup.net.http.get(url, cookies=self.cookie, headers=headers)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None

    def get_movies(self, url, page, idname='dle-content', nocache=False, search="", itemclassname="shortstory"):
        page = int(page)

        if(page > 0 and search == ''):
            url = SITE_URL+"/"+url.strip('/')+"/page/"+str(page+1)
        else:
            url = SITE_URL+"/"+url.strip('/')

        if(search != ''):
            html = self.ajax(url)
        else:
            html = self.load(url)

        print url

        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        if(search != ''):
            result['page'] = self.get_page_search(soup)
        else:
            result['page'] = self.get_page(soup)
        if(idname != ''):
            center_menu = soup.find('div', id=idname)
        else:
            center_menu = soup
        try:
            for div in center_menu.find_all('article', class_=itemclassname):
                href = div.find('div', class_='short')#.find('a')

                not_movie = True
                try:
                    not_movie_test = div.find('span', class_='not-movie').get_text()
                except:
                    not_movie = False

                try:
                    quality = div.find('div', class_='rip').find('span', class_='added-info').get_text().strip()
                except:
                    quality = ''

                dop_information = []
                try:
                    year = div.find('div', class_='item year').find('a').get_text().strip()
                    dop_information.append(year)
                except:
                    pass

                try:
                    genre = div.find('div', class_='category').find(class_='added-info').get_text().strip()
                    dop_information.append(genre)
                except:
                    print traceback.format_exc()

                information = ''
                if(len(dop_information) > 0):
                    information = '[COLOR white]['+', '.join(dop_information)+'][/COLOR]'

                movieposter = SITE_URL+href.find('img', class_='poster').get('src')

                movie_url = href.find('a').get('href'),
                movie_id = re.compile('/([\d]+)-', re.S).findall(movie_url[0])[0]

                result['data'].append({
                        'url': movie_url[0],
                        'id': movie_id,
                        'not_movie': not_movie,
                        'quality': self.format_quality(quality),
                        'year': information,
                        'name': href.find('div', class_='name').get_text().strip(),
                        'img': None if not movieposter else movieposter
                    })
        except:
            print traceback.format_exc()

        if(nocache):
            return None, result
        else:
            return cache_minutes, result

    def decode_direct_media_url(self, encoded_url):
        codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
        codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
        i = 0
        for a in codec_a:
            b = codec_b[i]
            i += 1
            encoded_url = encoded_url.replace(a, '___')
            encoded_url = encoded_url.replace(b, a)
            encoded_url = encoded_url.replace('___', b)

        return base64.b64decode(encoded_url)

    def format_direct_link(self, source_link, q):
        regex = re.compile("\[([^\]]+)\]", re.IGNORECASE)
        return regex.sub(q, source_link)

    def get_qualitys(self, source_link):
        try:
            avail_quality = re.compile("\[([^\]]+)\]", re.S).findall(source_link)[0]
            return avail_quality.split(',')
        except:
            return '0'.split()

    def get_movie_info(self, url):
        html = self.load(url)

        movieInfo = {}
        movieInfo['no_files'] = None
        movieInfo['episodes'] = True
        movieInfo['movies'] = []
        movieInfo['resolutions'] = []

        if not html:
            movieInfo['no_files'] = 'HTTP error'
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        print self.strip_scripts(html)

        try:
            try:
                js_string = self.decode_direct_media_url(re.compile("videoLink = '([^\']+)';", re.S).findall(html)[0].decode('string_escape').decode('utf-8'))
            except:
                try:
                    js_string = self.decode_direct_media_url(re.compile("plLink = '([^\']+)';", re.S).findall(html)[0].decode('string_escape').decode('utf-8'))
                except:
                    movieInfo['no_files'] = xbmcup.app.lang[34026].encode('utf8')
                    raise

            if(js_string.find('.txt') != -1):
                playlist = self.decode_direct_media_url(self.load(js_string))

                movies = json.loads(playlist, 'utf-8')
                for season in movies['playlist']:
                    current_movie = {'folder_title' : season['comment'], 'movies': {}}

                    for movie in season['playlist']:
                        avail_quality = self.get_qualitys(movie['file'])
                        for q in avail_quality:
                            if(q == ''): continue
                            direct_link = self.format_direct_link(movie['file'], q) if q != 0 else movie['file']
                            try:
                                current_movie['movies'][q].append(direct_link)
                            except:
                                current_movie['movies'][q] = []
                                current_movie['movies'][q].append(direct_link)


                    #for resulut in current_movie['movies']:
                    #    current_movie['movies'][resulut] = current_movie['movies'][resulut][0]

                    movieInfo['movies'].append(current_movie)

            elif(js_string.find('http://') != -1):
                avail_quality = self.get_qualitys(js_string)
                current_movie = {'folder_title' : '1', 'movies': {}}
                for q in avail_quality:
                    if(q == ''): continue
                    direct_link = self.format_direct_link(js_string, q) if q != 0 else js_string
                    try:
                        current_movie['movies'][q].append(direct_link)
                    except:
                        current_movie['movies'][q] = []
                        current_movie['movies'][q].append(direct_link)

                movieInfo['movies'].append(current_movie)

            movieInfo['title'] = soup.find('h1', class_='name').get_text()
            try:
                movieInfo['originaltitle'] = soup.find('div', class_='origin-name').get_text().strip()
            except:
                movieInfo['originaltitle'] = ''

            try:
                movieInfo['description'] = soup.find('div', class_='full-story').get_text().strip()
            except:
                movieInfo['description'] = ''

            try:
                movieInfo['fanart'] = SITE_URL+soup.find('div', class_='screen_bg').find('a').get('href')
            except:
                movieInfo['fanart'] = ''
            try:
                movieInfo['cover'] = SITE_URL+soup.find('img', class_='poster').get('src')
            except:
                movieInfo['cover'] = ''
            try:
                movieInfo['genres'] = []
                genres = soup.find('div', class_='category').findAll('a')
                for genre in genres:
                   movieInfo['genres'].append(genre.get_text().strip())
                movieInfo['genres'] = ' / '.join(movieInfo['genres']).encode('utf-8')
            except:
                movieInfo['genres'] = ''

            try:
                movieInfo['year'] = soup.find('div', class_='year').find('a').get_text()
            except:
                movieInfo['year'] = ''

            try:
                movieInfo['durarion'] = soup.find('div', class_='durarion').get('content')
            except:
                movieInfo['durarion'] = ''

            try:
                movieInfo['ratingValue'] = float(soup.find(attrs={'itemprop' : 'ratingValue'}).get_text())
            except:
                movieInfo['ratingValue'] = 0

            try:
                movieInfo['ratingCount'] = int(soup.find(attrs={'itemprop' : 'ratingCount'}).get_text())
            except:
                movieInfo['ratingCount'] = 0

            try:
                movieInfo['director'] = []
                directors = soup.find('div', class_='directors').findAll('a')
                for director in directors:
                   movieInfo['director'].append(director.get_text().strip())
                movieInfo['director'] = ', '.join(movieInfo['director']).encode('utf-8')
            except:
                movieInfo['director'] = ''
        except:
            pass
        return movieInfo

    def get_modal_info(self, url):
        html = self.load(url)
        movieInfo = {}
        movieInfo['error'] = False
        if not html:
            movieInfo['error'] = True
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        try:
            movieInfo['desc'] = soup.find('div', class_='full-story').get_text().strip()
        except:
            movieInfo['desc'] = ''

        try:
            movieInfo['title'] = soup.find('h1', class_='name').get_text()
        except:
            movieInfo['title'] = ''

        try:
            movieInfo['originaltitle'] = soup.find('div', class_='origin-name').get_text().strip()
        except:
            movieInfo['originaltitle'] = ''

        if(movieInfo['originaltitle'] != ''):
             movieInfo['title'] = '%s / %s' % (movieInfo['title'],  movieInfo['originaltitle'])

        try:
            movieInfo['poster'] = SITE_URL+soup.find('img', class_='poster').get('src')
        except:
            movieInfo['poster'] = ''

        movieInfo['desc'] = ''
        try:
            infos = soup.find('div', class_='full min').find_all('div', class_="item")
            skip = True
            for div in infos:
                if(skip):
                    skip = False
                    continue
                movieInfo['desc'] += self.format_desc_item(div.get_text().strip())+"\n"
        except:
           movieInfo['desc'] = traceback.format_exc()

        try:
            div = soup.find('div', class_='full-panel').find('span', class_='kinopoisk')
            rvalue = div.find('div', attrs={'itemprop' : 'ratingValue'}).get_text().strip()
            rcount = div.find('div', attrs={'itemprop' : 'ratingCount'}).get_text().strip()
            kp = xbmcup.app.lang[34029] % (self.format_rating(rvalue), rvalue, rcount)
            movieInfo['desc'] += kp+"\n"
        except:
            pass

        try:
            div = soup.find('div', class_='full-panel').find('span', class_='imdb').find_all('div')
            rvalue = div[0].get_text().strip()
            rcount = div[1].get_text().strip()
            kp = xbmcup.app.lang[34030] % (self.format_rating(rvalue), rvalue, rcount)
            movieInfo['desc'] += kp+"\n"
        except:
            pass

        try:
            desc = soup.find('div', class_='full-story').get_text().strip()
            movieInfo['desc'] += '\n[COLOR blue]%s[/COLOR]\n%s' % (xbmcup.app.lang[34027], desc)
        except:
            movieInfo['desc'] = traceback.format_exc()

        try:
            movieInfo['trailer'] = soup.find('li', attrs={'data-id' : "trailers"}).find('a').get('href')
        except:
            movieInfo['trailer'] = False

        return movieInfo

    def my_int(self, str):
        if(str == ''):
            return 0
        return int(str)

    def get_trailer(self, url):
        progress = xbmcgui.DialogProgress()
        progress.create(xbmcup.app.addon['name'])
        progress.update(0)
        html = self.load(url)
        movieInfo = {}
        movieInfo['error'] = False
        if not html:
            xbmcup.gui.message(xbmcup.app.lang[34031].encode('utf8'))
            progress.update(0)
            progress.close()
            return False

        progress.update(50)
        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        link = self.decode_direct_media_url(soup.find('input', id='video-link').get('value'))
        avail_quality = max(map(self.my_int, self.get_qualitys(link)))
        progress.update(100)
        progress.close()
        return self.format_direct_link(link, str(avail_quality))

    def format_desc_item(self, text):
        return re.compile(r'^([^:]+:)', re.S).sub('[COLOR blue]\\1[/COLOR] ', text)


    def strip_scripts(self, html):
        html = re.compile(r'<head[^>]*>(.*?)</head>', re.S).sub('<head></head>', html)
        #удаляет все теги <script></script> и их содержимое
        #сделал для того, что бы html parser не ломал голову на тегах в js
        return re.compile(r'<script[^>]*>(.*?)</script>', re.S).sub('', html)

    def format_rating(self, rating):
        rating = float(rating)
        if(rating == 0):
            return 'white'
        elif(rating > 7):
            return 'ff59C641'
        elif(rating > 4):
            return 'ffFFB119'
        else:
            return 'ffDE4B64'


    def format_quality(self, quality):
        if(quality == ''): return ''
        if(quality.find('1080') != -1):
            q = 'HD'
        elif(quality.find('720') != -1):
            q = 'HQ'
        elif(quality.find('480') != -1):
            q = 'SQ'
        else:
            q = 'LQ'

        qualitys = {'HD' : 'ff3BADEE', 'HQ' : 'ff59C641', 'SQ' : 'ffFFB119', 'LQ' : 'ffDE4B64'}
        if(q in qualitys):
            return "[COLOR %s][%s][/COLOR]" % (qualitys[q], quality)
        return ("[COLOR ffDE4B64][%s][/COLOR]" % quality if quality != '' else '')


    def get_page(self, soup):
        info = {'pagenum' : 0, 'maxpage' : 0}
        try:
            wrap  = soup.find('div', class_='navigation')
            info['pagenum'] = int(wrap.find('span', class_='').get_text())
            try:
                info['maxpage'] = len(wrap.find('a', class_='next'))
                if(info['maxpage'] > 0):
                    info['maxpage'] = info['pagenum']+1
            except:
                info['maxpage'] = info['pagenum']
                print traceback.format_exc()

        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info


    def get_page_search(self, soup):
        info = {'pagenum' : 0, 'maxpage' : 0}
        try:
            wrap  = soup.find('div', class_='navigation')
            current_page = wrap.find_all('span', class_='')
            info['pagenum'] = 1
            for cpage in current_page:
                if(cpage.get_text().find('...') == -1):
                    info['pagenum'] = int(cpage.get_text())
                    break

            try:
                clicks = wrap.find_all('span', class_='click')
                pages = []
                for page in clicks:
                    pages.append(int(page.get_text()))

                info['maxpage'] = max(pages)
            except:
                info['maxpage'] = info['pagenum']
                print traceback.format_exc()

        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info
示例#4
0
class HttpData:
    def load(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {'Referer': url}
            response = xbmcup.net.http.get(url,
                                           cookies=self.cookie,
                                           headers=headers)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if (response.status_code == 200):
                if (self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {'Referer': url}
            response = xbmcup.net.http.post(url,
                                            data,
                                            cookies=self.cookie,
                                            headers=headers)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if (response.status_code == 200):
                if (self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def ajax(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {
                'X-Requested-With': 'XMLHttpRequest',
                'Referer': SITE_URL
            }
            response = xbmcup.net.http.get(url,
                                           cookies=self.cookie,
                                           headers=headers)
            print url
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None

    def get_movies(self,
                   url,
                   page,
                   classname='main_content_item',
                   nocache=False,
                   search="",
                   itemclassname="item"):
        page = int(page)
        if (page > 0):
            url = SITE_URL + "/" + url.strip('/') + "/page/" + str(page + 1)
        else:
            url = SITE_URL + "/" + url.strip('/')
        print url

        if (search != '' and page == 0):
            html = self.post(url, {'usersearch': search, 'filter': 'all'})
        else:
            html = self.load(url)

        #print html.encode('utf-8')

        if not html:
            return None, {'page': {'pagenum': 0, 'maxpage': 0}, 'data': []}
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        #print soup
        result['page'] = self.get_page(soup)
        center_menu = soup.find('div', class_=classname)
        # print center_menu
        try:
            for div in center_menu.find_all('div', class_=itemclassname):
                if (search != ''):
                    href = None
                else:
                    href = div.find('h2').find('a')
                try:
                    quality = div.find(
                        'span',
                        class_='quality_film_title').get_text().strip()
                except:
                    quality = ''

                dop_information = []
                try:
                    if (itemclassname == 'item_wrap'):
                        year = div.find(
                            'a', class_='fast_search').get_text().strip()
                    else:
                        year = div.find(
                            'div', class_='smoll_year').get_text().strip()
                    dop_information.append(year)
                except:
                    pass

                try:
                    if (itemclassname == 'item_wrap'):
                        genre = div.find(
                            'span',
                            class_='section_item_list').get_text().strip()
                    else:
                        genre = div.find(
                            'div', class_='smoll_janr').get_text().strip()
                    dop_information.append(genre)
                except:
                    pass

                information = ''
                if (len(dop_information) > 0):
                    information = '[COLOR white][' + ', '.join(
                        dop_information) + '][/COLOR]'

                posters = div.find('div', class_='preview').find_all('img')

                movieposter = None
                for img in posters:
                    img_src = img.get('src')
                    if (img_src.find('http') != -1):
                        movieposter = img_src
                        if (search != ''):
                            href = img.parent
                        break

                if (href == None):
                    raise

                #костыль для закладок
                if (classname == 'book_mark_content'):
                    try:
                        movieposter = SITE_URL + posters[0].get('src')
                    except:
                        pass

                if (search != ''):
                    name = href.find('img').get('alt').strip()
                else:
                    name = href.get_text().strip()

                movie_url = href.get('href'),
                movie_id = re.compile('/film/([\d]+)-',
                                      re.S).findall(movie_url[0])[0]

                result['data'].append({
                    'url':
                    movie_url,
                    'id':
                    movie_id,
                    'quality':
                    self.format_quality(quality),
                    'year':
                    information,
                    'name':
                    name,
                    'img':
                    None if not movieposter else movieposter
                })

            #print result['data']
        except:
            print traceback.format_exc()

        if (nocache):
            return None, result
        else:
            return cache_minutes, result

    def get_movie_info(self, url):

        movieInfo = {}
        movieInfo['no_files'] = None
        movieInfo['episodes'] = True
        movieInfo['movies'] = []
        movieInfo['resolutions'] = []
        movieInfo['page_url'] = url[0]

        url = SITE_URL + url[0]
        html = self.load(url)
        print url.encode('utf-8')

        if not html:
            movieInfo['no_files'] = 'HTTP error'
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        folders = soup.find('div', id='accordion_wrap').findAll(
            'div', class_='accordion_item')
        #folders = soup.find('div', id='accordion_wrap').findAll('div', class_='folder_name')

        avalible_res = soup.find('div', id='film_object_params').find(
            'span', class_='film_q_img').get_text()

        #подпер костылем, пусть не болеет
        quality_matrix = {
            'HD': ['360', '480', '720', '1080'],
            'HQ': ['360', '480', '720'],
            'SQ': ['360', '480'],
            'LQ': ['360']
        }

        if (avalible_res == None or avalible_res not in quality_matrix):
            avalible_res = 'HD'

        movies = {}
        for fwrap in folders:
            folder_id = fwrap.find('div',
                                   class_='folder_name').get('data-folder')
            movies[folder_id] = {}
            folder_items = fwrap.findAll('div', class_='film_title_link')
            for q in quality_matrix[avalible_res]:
                for item in folder_items:
                    movie_data = [
                        item.find('a').get_text().encode('utf-8'),
                        item.find('a').get('data-href')
                    ]
                    try:
                        movies[folder_id][q].append(movie_data)
                    except:
                        movies[folder_id][q] = []
                        movies[folder_id][q].append(movie_data)

        #print movies

        #js_string = re.compile("'source'\s*:\s*\$\.parseJSON\('([^\']+)'\)", re.S).findall(html)[0].decode('string_escape').decode('utf-8')
        #movies = json.loads(js_string, 'utf-8')
        #print movies
        if (movies != None and len(movies) > 0):
            for window_id in movies:
                current_movie = {'folder_title': '', 'movies': {}}
                try:
                    current_movie['folder_title'] = soup.find('div', {'data-folder': str(window_id)}).find('a')\
                        .get('title').encode('utf-8')
                except:
                    current_movie['folder_title'] = xbmcup.app.lang[30113]

                sort_movies = sorted(movies[window_id].items(),
                                     key=lambda (k, v): int(k))
                for movie in sort_movies:
                    try:
                        current_movie['movies'][movie[0]].append(movie[1])
                    except:
                        current_movie['movies'][movie[0]] = []
                        current_movie['movies'][movie[0]].append(movie[1])

                for resulut in current_movie['movies']:
                    current_movie['movies'][resulut] = current_movie['movies'][
                        resulut][0]
                    # if(len(current_movie['movies'][resulut]) > 1):
                    #     movieInfo['episodes'] = True

                movieInfo['movies'].append(current_movie)

            # movieInfo['movies'] = movies

            movieInfo['title'] = soup.find('h1',
                                           id='film_object_name').get_text()
            try:
                movieInfo['description'] = soup.find(
                    'div', class_='description').get_text().strip()
            except:
                movieInfo['description'] = ''

            try:
                movieInfo['fanart'] = SITE_URL + soup.find(
                    'div', class_='screen_bg').find('a').get('href')
            except:
                movieInfo['fanart'] = ''
            try:
                movieInfo['cover'] = soup.find('img',
                                               id='preview_img').get('src')
            except:
                movieInfo['cover'] = ''
            try:
                movieInfo['genres'] = []
                genres = soup.find('div', class_='list_janr').findAll('a')
                for genre in genres:
                    movieInfo['genres'].append(genre.get_text().strip())
                movieInfo['genres'] = ' / '.join(
                    movieInfo['genres']).encode('utf-8')
            except:
                movieInfo['genres'] = ''

            try:
                results = soup.findAll('a', class_='fast_search')
                movieInfo['year'] = self.get_year(results)
            except:
                movieInfo['year'] = ''
            try:
                movieInfo['director'] = soup.find(
                    'span', class_='regiser_item').get_text().encode('utf-8')
            except:
                movieInfo['director'] = ''
        else:
            try:
                no_files = soup.find(
                    'div',
                    class_='no_files').get_text().strip().encode('utf-8')
            except:
                no_files = 'Что-то пошло не так...'

            movieInfo['no_files'] = no_files

        return movieInfo

    def get_collections(self):
        url = SITE_URL + "/collection"
        html = self.load(url)
        if not html:
            return None, {'page': {'pagenum': 0, 'maxpage': 10}, 'data': []}
        html = html.encode('utf-8')
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find('div', class_='main_content_item')
        try:
            for div in wrap.find_all('div', class_='item'):
                try:
                    preview_img = div.find(
                        'div', class_='preview').find('img').get('src')
                except:
                    preview_img = ''

                try:
                    movie_count = div.find(
                        'div',
                        class_='item_content').find('span').get_text().strip()
                except:
                    movie_count = ''

                try:
                    href = div.find('div', class_='item_content').find('a')
                    name = href.get_text().strip() + (
                        ' (%s)' % movie_count if movie_count != '' else '')
                    href = href.get('href')
                except:
                    name = ''
                    href = ''

                result['data'].append({
                    'url':
                    href,
                    'name':
                    name,
                    'img':
                    None if not preview_img else (SITE_URL + preview_img)
                })

        except:
            print traceback.format_exc()

        return cache_minutes, result

    def get_bookmarks(self):
        url = "%s/users/profile/bookmark" % SITE_URL

        #self.ajax('%s/users/profile/addbookmark?name=%s' % (SITE_URL, BOOKMARK_DIR))

        html = self.load(url)
        if not html:
            return None, {'page': {'pagenum': 0, 'maxpage': 0}, 'data': []}
        html = html.encode('utf-8')
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find('div', id='bookmark_list')

        try:
            for div in wrap.find_all('a'):
                try:
                    href = div.get('data-rel')
                    name = div.get_text().strip()
                except:
                    name = ''
                    href = ''

                result['data'].append({
                    'url': href,
                    'name': name,
                    'img': cover.treetv
                })

        except:
            print traceback.format_exc()

        return None, result

    def get_year(self, results):
        for res in results:
            if (res.get('data-rel') == 'year1'):
                return res.get_text().encode('utf-8')
        return 0

    def strip_scripts(self, html):
        #удаляет все теги <script></script> и их содержимое
        #сделал для того, что бы html parser не ломал голову на тегах в js

        html = re.compile(r'([a-zA-Z0-9]{1,1})"([a-zA-Z0-9]{1,1})').sub(
            "\\1'\\2", html)
        html = re.compile(r'<script[^>]*>(.*?)</script>', re.S).sub('', html)
        html = re.compile(r'</script>', re.S).sub('', html)
        html = re.compile(r'alt="(>+|src=")', re.S).sub('\\1', html)
        html = re.compile(r'title="(>+|src=")', re.S).sub('\\1', html)
        #print html.encode('utf-8')
        return html

    def format_quality(self, quality):
        qualitys = {
            'HD': 'ff3BADEE',
            'HQ': 'ff59C641',
            'SQ': 'ffFFB119',
            'LQ': 'ffDE4B64'
        }
        if (quality in qualitys):
            return "[COLOR %s][%s][/COLOR]" % (qualitys[quality], quality)
        return ("[COLOR ffDE4B64][%s][/COLOR]" %
                quality if quality != '' else '')

    def get_page(self, soup):
        info = {'pagenum': 0, 'maxpage': 0}
        try:
            try:
                wrap = soup.find('div', id='main_paginator')
                wrap.find('b')
            except:
                wrap = soup.find('div', class_='paginationControl')

            info['pagenum'] = int(
                wrap.find('a', class_="active").get_text().encode('utf-8'))
            try:
                info['maxpage'] = int(
                    wrap.find('a', class_='last').get('data-rel'))
            except:
                try:
                    try:
                        info['maxpage'] = int(
                            os.path.basename(
                                wrap.find('a', class_='next').get('href')))
                    except:
                        info['maxpage'] = wrap.find(
                            'a', class_='next').get('data-rel')
                except:
                    info['maxpage'] = info['pagenum']
        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info
class HttpData:

    def load(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            response = xbmcup.net.http.get(url, cookies=self.cookie)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            response = xbmcup.net.http.post(url, data, cookies=self.cookie)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None


    def ajax(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {
                'X-Requested-With' : 'XMLHttpRequest'
            }
            response = xbmcup.net.http.get(url, cookies=self.cookie, headers=headers)
            print url
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None

    def get_movies(self, url, page, classname='main_content_item', nocache=False, search=""):
        page = int(page)
        if(page > 0):
            url = SITE_URL+"/"+url.strip('/')+"/page/"+str(page+1)
        else:
            url = SITE_URL+"/"+url.strip('/')
        print url

        if(search != ''):
            html = self.post(url, {'usersearch' : search})
        else:
            html = self.load(url)

        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        result['page'] = self.get_page(soup)
        center_menu = soup.find('div', class_=classname)

        try:
            for div in center_menu.find_all('div', class_='item'):
                href = div.find('h2').find('a')
                try:
                    quality = div.find('span', class_='quality_film_title').get_text().strip()
                except:
                    quality = ''

                dop_information = []
                try:
                    year = div.find('div', class_='smoll_year').get_text().strip()
                    dop_information.append(year)
                except:
                    pass

                try:
                    genre = div.find('div', class_='smoll_janr').get_text().strip()
                    dop_information.append(genre)
                except:
                    pass

                information = ''
                if(len(dop_information) > 0):
                    information = '[COLOR white]['+', '.join(dop_information)+'][/COLOR]'

                posters = div.find('div', class_='preview').find_all('img')
                movieposter = None
                for img in posters:
                    img_src = img.get('src')
                    if(img_src.find('/public/') != -1):
                        movieposter = img_src
                        break
                movie_url = href.get('href'),
                movie_id = re.compile('id=([\d]+)', re.S).findall(movie_url[0])[0]

                result['data'].append({
                        'url': movie_url,
                        'id': movie_id,
                        'quality': self.format_quality(quality),
                        'year': information,
                        'name': href.get_text().strip(),
                        'img': None if not movieposter else (SITE_URL + movieposter)
                    })
        except:
            print traceback.format_exc()
        print nocache
        if(nocache):
            return None, result
        else:
            return cache_minutes, result


    def get_movie_info(self, url):
        url = SITE_URL+"/"+url[0]
        html = self.load(url)

        movieInfo = {}
        movieInfo['no_files'] = None
        movieInfo['episodes'] = True
        movieInfo['movies'] = []
        movieInfo['resolutions'] = []

        if not html:
            movieInfo['no_files'] = 'HTTP error'
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        js_string = re.compile("'source' : \$\.parseJSON\('([^\']+)'\)", re.S).findall(html)[0].decode('string_escape').decode('utf-8')
        movies = json.loads(js_string, 'utf-8')
        print movies
        if(movies != None and len(movies) > 0):
            for window_id in movies:
                current_movie = {'folder_title' : '', 'movies': {}}
                try:
                    current_movie['folder_title'] = soup.find('div', {'data-folder': str(window_id)}).find('a').get('title').encode('utf-8')
                except:
                    current_movie['folder_title'] = xbmcup.app.lang[30113]

                sort_movies = sorted(movies[window_id].items(), key=lambda (k,v): int(k))
                for movie in sort_movies:
                    try:
                        current_movie['movies'][movie[0]].append(movie[1])
                    except:
                        current_movie['movies'][movie[0]] = []
                        current_movie['movies'][movie[0]].append(movie[1])

                for resulut in current_movie['movies']:
                    current_movie['movies'][resulut] = current_movie['movies'][resulut][0]
                    # if(len(current_movie['movies'][resulut]) > 1):
                    #     movieInfo['episodes'] = True

                movieInfo['movies'].append(current_movie)

            movieInfo['title'] = soup.find('h1', id='film_object_name').get_text()
            try:
                movieInfo['description'] = soup.find('div', class_='description').get_text().strip()
            except:
                movieInfo['description'] = ''

            try:
                movieInfo['fanart'] = SITE_URL+soup.find('div', class_='screen_bg').find('a').get('href')
            except:
                movieInfo['fanart'] = ''
            try:
                movieInfo['cover'] = SITE_URL+soup.find('img', id='preview_img').get('src')
            except:
                movieInfo['cover'] = ''
            try:
                movieInfo['genres'] = []
                genres = soup.find('div', class_='list_janr').findAll('a')
                for genre in genres:
                   movieInfo['genres'].append(genre.get_text().strip())
                movieInfo['genres'] = ' / '.join(movieInfo['genres']).encode('utf-8')
            except:
                movieInfo['genres'] = ''

            try:
                results = soup.findAll('a', class_='fast_search')
                movieInfo['year'] = self.get_year(results)
            except:
                movieInfo['year'] = ''
            try:
                movieInfo['director'] = soup.find('span', class_='regiser_item').get_text().encode('utf-8')
            except:
                movieInfo['director'] = ''
        else:
            try:
                no_files = soup.find('div', class_='no_files').get_text().strip().encode('utf-8')
            except:
                no_files = ''

            movieInfo['no_files'] = no_files

        return movieInfo

    def get_collections(self):
        url = SITE_URL+"/collection"
        html = self.load(url)
        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        html = html.encode('utf-8')
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find('div', class_='main_content_item')
        try:
            for div in wrap.find_all('div', class_='item'):
                try:
                    preview_img = div.find('div', class_='preview').find('img').get('src')
                except:
                    preview_img = ''

                try:
                    movie_count = div.find('div', class_='item_content').find('span').get_text().strip()
                except:
                    movie_count = ''

                try:
                    href = div.find('div', class_='item_content').find('a')
                    name = href.get_text().strip()+(' (%s)' % movie_count if movie_count != '' else '')
                    href = href.get('href')
                except:
                    name = ''
                    href = ''

                result['data'].append({
                        'url': href,
                        'name': name,
                        'img': None if not preview_img else (SITE_URL + preview_img)
                    })

        except:
            print traceback.format_exc()

        return cache_minutes, result


    def get_bookmarks(self):
        url = "%s/users/profile/bookmark" % SITE_URL

        #self.ajax('%s/users/profile/addbookmark?name=%s' % (SITE_URL, BOOKMARK_DIR))

        html = self.load(url)
        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        html = html.encode('utf-8')
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find('div', id='bookmark_list')

        try:
            for div in wrap.find_all('a'):
                try:
                    href = div.get('rel')
                    name = div.get_text().strip()
                except:
                    name = ''
                    href = ''

                result['data'].append({
                        'url': href,
                        'name': name,
                        'img': cover.treetv
                    })

        except:
            print traceback.format_exc()

        return None, result

    def get_year(self, results):
        for res in results:
            if(res.get('rel')[0] == 'year1'):
                return res.get_text().encode('utf-8')
        return 0

    def strip_scripts(self, html):
        #удаляет все теги <script></script> и их содержимое
        #сделал для того, что бы html parser не ломал голову на тегах в js
        return re.compile(r'<script[^>]*>(.*?)</script>', re.S).sub('', html)

    def format_quality(self, quality):
        qualitys = {'HD' : 'ff3BADEE', 'HQ' : 'ff59C641', 'SQ' : 'ffFFB119', 'LQ' : 'ffDE4B64'}
        if(quality in qualitys):
            return "[COLOR %s][%s][/COLOR]" % (qualitys[quality], quality)
        return ("[COLOR ffDE4B64][%s][/COLOR]" % quality if quality != '' else '')


    def get_page(self, soup):
        info = {'pagenum' : 0, 'maxpage' : 0}
        try:
            wrap  = soup.find('div', class_='paginationControl')
            info['pagenum'] = int(wrap.find('b').get_text().encode('utf-8'))
            try:
                info['maxpage'] = int(wrap.find('a', class_='last').get('rel')[0])
            except:
                try:
                    info['maxpage'] = int(os.path.basename(wrap.find('a', class_='next').get('href')))
                except:
                    info['maxpage'] = wrap.find('a', class_='next').get('rel')
        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info
class HttpData:

    def load(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            response = xbmcup.net.http.get(url, cookies=self.cookie)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            response = xbmcup.net.http.post(url, data, cookies=self.cookie)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None


    def ajax(self, url, data={}):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {
                'X-Requested-With' : 'XMLHttpRequest'
            }
            if(len(data) > 0):
                response = xbmcup.net.http.post(url, data, cookies=self.cookie, headers=headers)
            else:
                response = xbmcup.net.http.get(url, cookies=self.cookie, headers=headers)
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None

    def get_movies(self, url, page, idname='results', nocache=False, search="", itemclassname="results-item-wrap"):
        page = int(page)

        if(page > 0 and search == ''):
            url = SITE_URL+"/"+url.strip('/')+"?page="+str(page+1)
        else:
            url = SITE_URL+"/"+url.strip('/')

        if(search != ''):
            html = self.load(url)
        else:
            html = self.load(url)

        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        print soup
        result['page'] = self.get_page(soup)
        if(idname != ''):
            center_menu = soup.find('ul', class_=idname)
        else:
            center_menu = soup

        try:
            for div in center_menu.find_all('li', class_=itemclassname):
                if(itemclassname == 'vlist-item'):
                    href = div.find('a', class_='main-list-link')
                    name = href.find('h3', class_='main-list-title').get_text().strip()

                    try:
                        rating = div.find('span', class_='main-list-rating').get_text().strip()
                    except:
                        rating = 0

                    dop_information = []
                    try:
                        year = div.find('span', class_='main-list-year').get_text().strip()
                        dop_information.append(year)
                    except:
                        pass

                else:
                    href = div.find('a', class_='results-item')

                    dop_information = []
                    try:
                        year = div.find('span', class_='results-item-year').get_text().strip()
                        dop_information.append(year)
                    except:
                        pass

                    try:
                        rating = div.find('span', class_='results-item-rating').find('span').get_text().strip()
                    except:
                        rating = 0

                    name = href.find('div', class_='results-item-title').get_text().strip()

                information = ''
                if(len(dop_information) > 0):
                    information = '[COLOR white]['+', '.join(dop_information)+'][/COLOR]'

                try:
                    movieposter = div.find('meta', attrs={'itemprop' : 'image'}).get('content')
                except:
                    movieposter = None

                movie_url = href.get('href'),
                movie_id = movie_url[0]

                result['data'].append({
                        'url': movie_url[0],
                        'id': movie_id,
                        'rating': self.format_rating(rating),
                        'year': information,
                        'name': name,
                        'img': None if not movieposter else movieposter
                    })
        except:
            print traceback.format_exc()

        if(nocache):
            return None, result
        else:
            return cache_minutes, result

    def get_season_movies(self, url, issoup=False):
        if(issoup == False):
            html = self.load('%s%s' % (SITE_URL, url))
            html = html.encode('utf-8')
            soup = xbmcup.parser.html(self.strip_scripts(html))
        else:
            soup = url

        episodes = soup.find('ul', class_='js-episodes').find_all('li')

        current_movie = {}

        for episode in episodes:
            link = episode.find('a', class_='entity-episode-link')
            video_id = link.get('data-id')
            name = episode.find('div', class_='entity-episode-name').get_text().strip()
            name = re.sub('\s+', ' ', name)

            try:
                current_movie['1080'].append([video_id, name])
            except:
                current_movie['1080'] = []
                current_movie['1080'].append([video_id, name])

            try:
                current_movie['480'].append([video_id, name])
            except:
                current_movie['480'] = []
                current_movie['480'].append([video_id, name])

        if(issoup):
            return current_movie
        else:
            return cache_minutes, current_movie


    def get_movie_info(self, url):
        html = self.load('%s%s' % (SITE_URL, url))
        movieInfo = {}
        movieInfo['no_files'] = None
        movieInfo['episodes'] = True
        movieInfo['movies'] = []
        movieInfo['resolutions'] = []

        if not html:
            movieInfo['no_files'] = 'HTTP error'
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        isSerial = True
        try:
            try:
                seasons = soup.find('div', class_='entity-seasons').find_all('span')
                seasons[0].get_text()
            except:
                seasons = soup.find('div', class_='entity-seasons').find_all('a')
        except:
            isSerial = False

        try:
            video_id = soup.find('div', class_='entity-player').get('data-id')
        except:
            xbmcup.gui.message('Не найден идентификатор видео')
            return

        js_string = self.ajax('%s/ajax/video/%s' % (SITE_URL, video_id))
        movies = json.loads(js_string, 'utf-8')

        if(isSerial):
            for season in seasons:
                season_num = season.get_text().strip()

                s_url = season.get('href')
                if(s_url == None):
                    s_url = url

                current_movie = {
                    'folder_title'  : xbmcup.app.lang[35006]+' '+season_num,
                    'folder_url'    : s_url,
                    'movies'        : self.get_season_movies(soup, True),
                    'isSerial'      : True
                }
                movieInfo['movies'].append(current_movie)

        else:
            current_movie = {
                    'folder_title'  : '',
                    'folder_url'    : '',
                    'movies'        : {},
                    'isSerial'      : False
                }

            current_movie['movies']['1080'] = []
            current_movie['movies']['480'] = []

            current_movie['movies']['1080'].append([movies['url']])
            current_movie['movies']['480'].append([movies['lqUrl']])

            movieInfo['movies'].append(current_movie)

        movieInfo['title'] = soup.find('h1', class_='entity-title-text').find('span', class_='js-title').get_text()

        try:
            movieInfo['originaltitle'] = soup.find('h1', class_='entity-title-text').find('meta', attrs={'itemprop' : 'alternativeHeadline'}).get('content')
        except:
            movieInfo['originaltitle'] = ''

        try:
            movieInfo['description'] = soup.find('div', class_='entity-desc-description').get_text().strip()
        except:
            movieInfo['description'] = ''

        try:
            movieInfo['fanart'] = movies['images'][0]
        except:
            movieInfo['fanart'] = ''

        try:
            cover = soup.find('div', class_='entity-desc-poster-img').get('style')
            prog = re.compile('(http://[^\)]+)', re.I)
            result = prog.findall(cover)
            movieInfo['cover'] = result[0]
        except:
            movieInfo['cover'] = ''

        try:
            movieInfo['genres'] = []
            genres = soup.find('dd', class_='js-genres').find_all('a')
            for genre in genres:
               movieInfo['genres'].append(genre.find('span').get_text().strip())
            movieInfo['genres'] = ' '.join(movieInfo['genres']).encode('utf-8')
        except:
            movieInfo['genres'] = ''

        try:
            movieInfo['year'] = soup.find('div', class_='year').find('a').get_text()
        except:
            movieInfo['year'] = ''

        try:
            movieInfo['durarion'] = int(math.ceil(int(movies['duration'])/60))
        except:
            movieInfo['durarion'] = ''

        try:
            movieInfo['ratingValue'] = float(soup.find(attrs={'itemprop' : 'ratingValue'}).get('content'))
        except:
            movieInfo['ratingValue'] = 0

        try:
            movieInfo['ratingCount'] = int(soup.find(attrs={'itemprop' : 'ratingCount'}).get('content'))
        except:
            movieInfo['ratingCount'] = 0

        try:
            movieInfo['director'] = []
            directors = soup.find('dd', class_='js-scenarist').find_all('span')
            for director in directors:
               movieInfo['director'].append(director.find('span').get_text().strip())
            movieInfo['director'] = ', '.join(movieInfo['director']).encode('utf-8')
        except:
            movieInfo['director'] = ''

        return movieInfo


    def strip_scripts(self, html):
        #удаляет все теги <script></script> и их содержимое
        #сделал для того, что бы html parser не ломал голову на тегах в js
        return re.compile(r'<script[^>]*>(.*?)</script>', re.S).sub('', html)

    def format_rating(self, rating):
        rating = float(rating)
        if(rating == 0): return ''
        if(rating < 4):
            q = 'ffDE4B64'
        elif(rating < 7):
            q = 'ffFFB119'
        else:
            q = 'ff59C641'
        return "[COLOR %s][%s][/COLOR]" % (q, rating)



    def get_page(self, soup):
        info = {'pagenum' : 0, 'maxpage' : 0}
        try:
            wrap  = soup.find('div', class_='js-pageInfo')
            info['pagenum'] = int(wrap.get('data-current').encode('utf-8'))
            info['maxpage'] = int(wrap.get('data-total'))
        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()
        return info
class HttpData(FingerPrint):
    currentFingerPrint = None
    noAuthCookie = {}

    def getFingerPrint(self):
        if self.currentFingerPrint == None:
            return self.getFingerprint()
        return self.currentFingerPrint

    def load(self, url):
        self.currentFingerPrint = self.getFingerPrint()
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()

            reqCookie = self.noAuthCookie if self.cookie==None else self.cookie
            headers = {
                'Referer' : url,
                'User-agent' : self.currentFingerPrint['useragent']
            }
            response = xbmcup.net.http.get(url, cookies=reqCookie, headers=headers)
            self.noAuthCookie = response.cookies
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        self.currentFingerPrint = self.getFingerPrint()
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            reqCookie = self.noAuthCookie if self.cookie==None else self.cookie
            headers = {
                'Referer' : url,
                'User-agent' : self.currentFingerPrint['useragent']
            }
            response = xbmcup.net.http.post(url, data, cookies=reqCookie, headers=headers)
            self.noAuthCookie = response.cookies
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                if(self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def ajaxpost(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()

            if(self.cookie != None):
                self.cookie.set('mycook', self.currentFingerPrint['hash'])

            if(self.noAuthCookie != None):
                try:
                    self.noAuthCookie.set('mycook', self.currentFingerPrint['hash'])
                except:
                    pass

            reqCookie = self.noAuthCookie if self.cookie==None else self.cookie
            headers = {
                'Referer' : SITE_URL,
                'X-Requested-With'  : 'XMLHttpRequest',
                'User-agent' : self.currentFingerPrint['useragent']
            }
            response = xbmcup.net.http.post(url, data, cookies=reqCookie, headers=headers)
            #After saving the fingerprint, you do not need to remember cookies
            if(url != SITE_URL+'/film/index/imprint'):
                self.noAuthCookie = response.cookies
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if(response.status_code == 200):
                return response.text
            return None

    def ajax(self, url):
        self.currentFingerPrint = self.getFingerPrint()
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()

            if(self.cookie != None):
                self.cookie.set('mycook', self.currentFingerPrint['hash'])

            if(self.noAuthCookie != None):
                try:
                    self.noAuthCookie.set('mycook', self.currentFingerPrint['hash'])
                except:
                    pass

            reqCookie = self.noAuthCookie if self.cookie==None else self.cookie
            headers = {
                'X-Requested-With'  : 'XMLHttpRequest',
                'Referer'           : SITE_URL,
                'User-agent'        : self.currentFingerPrint['useragent']
            }
            response = xbmcup.net.http.get(url, cookies=reqCookie, headers=headers)
            self.noAuthCookie = response.cookies
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None

    def get_movies(self, url, page, classname='main_content_item', nocache=False, search="", itemclassname="item"):
        page = int(page)
        if(page > 0):
            url = SITE_URL+"/"+url.strip('/')+"/page/"+str(page+1)
        else:
            url = SITE_URL+"/"+url.strip('/')

        if(search != '' and page == 0):
            html = self.post(url, {'usersearch' : search, 'filter' : 'all'})
        else:
            html = self.load(url)

        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        #print soup
        result['page'] = self.get_page(soup)
        center_menu = soup.find('div', class_=classname)
        # print center_menu
        try:
            for div in center_menu.find_all('div', class_=itemclassname):
                if(search != ''):
                    href = None
                else:
                    href = div.find('h2').find('a')
                try:
                    quality = div.find('span', class_='quality_film_title').get_text().strip()
                except:
                    quality = ''

                dop_information = []
                try:
                    if(itemclassname == 'item_wrap'):
                        year = div.find('a', class_='fast_search').get_text().strip()
                    else:
                        year = div.find('div', class_='smoll_year').get_text().strip()
                    dop_information.append(year)
                except:
                    pass

                try:
                    if(itemclassname == 'item_wrap'):
                        genre = div.find('span', class_='section_item_list').get_text().strip()
                    else:
                        genre = div.find('div', class_='smoll_janr').get_text().strip()
                    dop_information.append(genre)
                except:
                    pass

                information = ''
                if(len(dop_information) > 0):
                    information = '[COLOR white]['+', '.join(dop_information)+'][/COLOR]'

                posters = div.find('div', class_='preview').find_all('img')

                movieposter = None
                for img in posters:
                    img_src = img.get('src')
                    if(img_src.find('http') != -1):
                        movieposter = img_src
                        if(search != ''):
                            href = img.parent
                        break

                if(href == None):
                    raise

                #костыль для закладок
                if(classname == 'book_mark_content'):
                    try:
                        movieposter = SITE_URL+posters[0].get('src')
                    except:
                        pass

                if(search != ''):
                    name = href.find('img').get('alt').strip()
                else:
                    name = href.get_text().strip()

                movie_url = href.get('href'),
                movie_id = re.compile('/film/([\d]+)-', re.S).findall(movie_url[0])[0]


                result['data'].append({
                        'url': movie_url,
                        'id': movie_id,
                        'quality': self.format_quality(quality),
                        'year': information,
                        'name': name,
                        'img': None if not movieposter else movieposter
                    })

            #print result['data']
        except:
            print traceback.format_exc()

        if(nocache):
            return None, result
        else:
            return cache_minutes, result


    def get_movie_info(self, url):

        movieInfo = {}
        movieInfo['no_files'] = None
        movieInfo['episodes'] = True
        movieInfo['movies'] = []
        movieInfo['resolutions'] = []
        movieInfo['page_url'] = url[0]

        url = SITE_URL+url[0]
        html = self.load(url)
        #print url.encode('utf-8')

        if not html:
            movieInfo['no_files'] = 'HTTP error'
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        folders = soup.find('div', id='accordion_wrap').findAll('div', class_='accordion_item')
        #folders = soup.find('div', id='accordion_wrap').findAll('div', class_='folder_name')

        avalible_res = soup.find('div', id='film_object_params').find('span', class_='film_q_img').get_text()

        #подпер костылем, пусть не болеет
        quality_matrix = {
            'HD' : ['360', '480', '720', '1080'],
            'HQ' : ['360', '480', '720'],
            'SQ' : ['360', '480'],
            'LQ' : ['360']
        }

        if(avalible_res == None or avalible_res not in quality_matrix):
            avalible_res = 'HD'

        movies = {}
        for fwrap in folders:
            try:
                folder_id = fwrap.find('div', class_='folder_name').get('data-folder')
                movies[folder_id] = {}
                folder_items = fwrap.findAll('div', class_='film_title_link')
                for q in quality_matrix[avalible_res]:
                    for item in folder_items:
                        movie_data = [item.find('a').get_text().encode('utf-8'), item.find('a').get('data-href')]
                        try:
                            movies[folder_id][q].append(movie_data)
                        except:
                            movies[folder_id][q] = []
                            movies[folder_id][q].append(movie_data)
            except:
                pass


        #print movies

        #js_string = re.compile("'source'\s*:\s*\$\.parseJSON\('([^\']+)'\)", re.S).findall(html)[0].decode('string_escape').decode('utf-8')
        #movies = json.loads(js_string, 'utf-8')
        #print movies
        if(movies != None and len(movies) > 0):
            for window_id in movies:
                current_movie = {'folder_title' : '', 'movies': {}}
                try:
                    current_movie['folder_title'] = soup.find('div', {'data-folder': str(window_id)}).find('a')\
                        .get('title').encode('utf-8')
                except:
                    current_movie['folder_title'] = xbmcup.app.lang[30113]

                sort_movies = sorted(movies[window_id].items(), key=lambda (k,v): int(k))
                for movie in sort_movies:
                    try:
                        current_movie['movies'][movie[0]].append(movie[1])
                    except:
                        current_movie['movies'][movie[0]] = []
                        current_movie['movies'][movie[0]].append(movie[1])

                for resulut in current_movie['movies']:
                    current_movie['movies'][resulut] = current_movie['movies'][resulut][0]
                    # if(len(current_movie['movies'][resulut]) > 1):
                    #     movieInfo['episodes'] = True

                movieInfo['movies'].append(current_movie)

            # movieInfo['movies'] = movies

            movieInfo['title'] = soup.find('h1', id='film_object_name').get_text()
            try:
                movieInfo['description'] = soup.find('div', class_='description').get_text().strip()
            except:
                movieInfo['description'] = ''

            try:
                movieInfo['fanart'] = SITE_URL+soup.find('div', class_='screen_bg').find('a').get('href')
            except:
                movieInfo['fanart'] = ''
            try:
                movieInfo['cover'] = soup.find('img', id='preview_img').get('src')
            except:
                movieInfo['cover'] = ''
            try:
                movieInfo['genres'] = []
                genres = soup.find('div', class_='list_janr').findAll('a')
                for genre in genres:
                   movieInfo['genres'].append(genre.get_text().strip())
                movieInfo['genres'] = ' / '.join(movieInfo['genres']).encode('utf-8')
            except:
                movieInfo['genres'] = ''

            try:
                results = soup.findAll('a', class_='fast_search')
                movieInfo['year'] = self.get_year(results)
            except:
                movieInfo['year'] = ''
            try:
                movieInfo['director'] = soup.find('span', class_='regiser_item').get_text().encode('utf-8')
            except:
                movieInfo['director'] = ''
        else:
            try:
                no_files = soup.find('div', class_='no_files').get_text().strip().encode('utf-8')
            except:
                no_files = 'Что-то пошло не так...'

            movieInfo['no_files'] = no_files

        return movieInfo

    def get_collections(self):
        url = SITE_URL+"/collection"
        html = self.load(url)
        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 10}, 'data': []}
        html = html.encode('utf-8')
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find('div', class_='main_content_item')
        try:
            for div in wrap.find_all('div', class_='item'):
                try:
                    preview_img = div.find('div', class_='preview').find('img').get('src')
                except:
                    preview_img = ''

                try:
                    movie_count = div.find('div', class_='item_content').find('span').get_text().strip()
                except:
                    movie_count = ''

                try:
                    href = div.find('div', class_='item_content').find('a')
                    name = href.get_text().strip()+(' (%s)' % movie_count if movie_count != '' else '')
                    href = href.get('href')
                except:
                    name = ''
                    href = ''

                result['data'].append({
                        'url': href,
                        'name': name,
                        'img': None if not preview_img else (SITE_URL + preview_img)
                    })

        except:
            print traceback.format_exc()

        return cache_minutes, result


    def get_bookmarks(self):
        url = "%s/users/profile/bookmark" % SITE_URL

        #self.ajax('%s/users/profile/addbookmark?name=%s' % (SITE_URL, BOOKMARK_DIR))

        html = self.load(url)
        if not html:
            return None, {'page': {'pagenum' : 0, 'maxpage' : 0}, 'data': []}
        html = html.encode('utf-8')
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))
        wrap = soup.find('div', id='bookmark_list')

        try:
            for div in wrap.find_all('a'):
                try:
                    href = div.get('data-rel')
                    name = div.get_text().strip()
                except:
                    name = ''
                    href = ''

                result['data'].append({
                        'url': href,
                        'name': name,
                        'img': cover.treetv
                    })

        except:
            print traceback.format_exc()

        return None, result

    def get_year(self, results):
        for res in results:
            if(res.get('data-rel') == 'year1'):
                return res.get_text().encode('utf-8')
        return 0

    def strip_scripts(self, html):
        #удаляет все теги <script></script> и их содержимое
        #сделал для того, что бы html parser не ломал голову на тегах в js

        html = re.compile(r'([a-zA-Z0-9]{1,1})"([a-zA-Z0-9]{1,1})').sub("\\1'\\2", html)
        html = re.compile(r'<script[^>]*>(.*?)</script>', re.S).sub('', html)
        html = re.compile(r'</script>', re.S).sub('', html)
        html = re.compile(r'alt="(>+|src=")', re.S).sub('\\1', html)
        html = re.compile(r'title="(>+|src=")', re.S).sub('\\1', html)
        #print html.encode('utf-8')
        return html

    def format_quality(self, quality):
        qualitys = {'HD' : 'ff3BADEE', 'HQ' : 'ff59C641', 'SQ' : 'ffFFB119', 'LQ' : 'ffDE4B64'}
        if(quality in qualitys):
            return "[COLOR %s][%s][/COLOR]" % (qualitys[quality], quality)
        return ("[COLOR ffDE4B64][%s][/COLOR]" % quality if quality != '' else '')


    def get_page(self, soup):
        info = {'pagenum' : 0, 'maxpage' : 0}
        try:
            try:
                wrap  = soup.find('div', id='main_paginator')
                wrap.find('b')
            except:
                wrap  = soup.find('div', class_='paginationControl')

            info['pagenum'] = int(wrap.find('a', class_="active").get_text().encode('utf-8'))
            try:
                info['maxpage'] = int(wrap.find('a', class_='last').get('data-rel'))
            except:
                try:
                    try:
                        info['maxpage'] = int(os.path.basename(wrap.find('a', class_='next').get('href')))
                    except:
                        info['maxpage'] = wrap.find('a', class_='next').get('data-rel')
                except:
                    info['maxpage'] = info['pagenum']
        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info