Esempio n. 1
0
 def load_torrent_json_data(self):
     """Load torrent data from a JSON call"""
     LOGGER.debug("Loading Torrent data from torrent JSON page.")
     if 'GroupId' not in self.data or not self.data['GroupId']:
         movie_url = session.base_get('torrents.php', params={'torrentid': self.ID}).url
         self.data['GroupId'] = re.search(r'\?id=(\d+)', movie_url).group(1)
     self.data.update(session.base_get('torrents.php',
                                       params={'action': 'description',
                                               'id': self.data['GroupId'],
                                               'torrentid': self.ID}).json())
Esempio n. 2
0
 def load_torrent_json_data(self):
     """Load torrent data from a JSON call"""
     LOGGER.debug("Loading Torrent data from torrent JSON page.")
     if "GroupId" not in self.data or not self.data["GroupId"]:
         movie_url = session.base_get("torrents.php", params={"torrentid": self.ID}).url
         self.data["GroupId"] = re.search(r"\?id=(\d+)", movie_url).group(1)
     self.data.update(
         session.base_get(
             "torrents.php", params={"action": "description", "id": self.data["GroupId"], "torrentid": self.ID}
         ).json()
     )
Esempio n. 3
0
File: api.py Progetto: mza921/PTPAPI
 def __init__(self, username=None, password=None, passkey=None):
     j = None
     self.cookies_file = os.path.expanduser(
         config.get('Main', 'cookiesFile'))
     LOGGER.info("Initiating login sequence.")
     password = (password or config.get('PTP', 'password'))
     username = (username or config.get('PTP', 'username'))
     passkey = (passkey or config.get('PTP', 'passkey'))
     if os.path.isfile(self.cookies_file):
         self.__load_cookies()
         # A really crude test to see if we're logged in
         session.max_redirects = 1
         try:
             req = session.base_get('torrents.php')
         except requests.exceptions.TooManyRedirects:
             if os.path.isfile(self.cookies_file):
                 os.remove(self.cookies_file)
             session.cookies = requests.cookies.RequestsCookieJar()
         session.max_redirects = 3
     if not os.path.isfile(self.cookies_file):
         if not password or not passkey or not username:
             raise PTPAPIException("Not enough info provided to log in.")
         try:
             req = session.base_post('ajax.php?action=login',
                                     data={
                                         "username": username,
                                         "password": password,
                                         "passkey": passkey
                                     })
             j = req.json()
         except ValueError:
             if req.status_code == 200:
                 raise PTPAPIException(
                     "Could not parse returned json data.")
             else:
                 if req.status_code == 429:
                     LOGGER.critical(req.text.strip())
                 req.raise_for_status()
         if j["Result"] != "Ok":
             raise PTPAPIException(
                 "Failed to log in. Please check the username, password and passkey. Response: %s"
                 % j)
         self.__save_cookie()
         # Get some information that will be useful for later
         req = session.base_get('index.php')
     Util.raise_for_cloudflare(req.text)
     LOGGER.info("Login successful.")
     self.current_user_id = re.search(r'user.php\?id=(\d+)',
                                      req.text).group(1)
     self.auth_key = re.search(r'auth=([0-9a-f]{32})', req.text).group(1)
Esempio n. 4
0
 def load_html_data(self):
     """Scrape all data from a movie's HTML page"""
     soup = bs4(session.base_get("torrents.php", params={'id': self.ID}).text, "html.parser")
     self.data['Cover'] = soup.find('img', class_='sidebar-cover-image')['src']
     # Title and Year
     match = re.match(r'(.*) \[(\d{4})\]', soup.find('h2', class_='page__title').encode_contents())
     self.data['Title'] = match.group(1)
     self.data['Year'] = match.group(2)
     # Genre tags
     self.data['Tags'] = []
     for tagbox in soup.find_all('div', class_="box_tags"):
         for tag in tagbox.find_all("li"):
             self.data['Tags'].append(tag.find('a').string)
     self.data['Directors'] = []
     for director in soup.find('h2', class_='page__title').find_all('a', class_='artist-info-link'):
         self.data['Directors'].append({'Name': director.string})
     # File list & trumpability
     for tor in self['Torrents']:
         # Get file list
         filediv = soup.find("div", id="files_%s" % tor.ID)
         tor.data['Filelist'] = {}
         basepath = re.match(r'\/(.*)\/', filediv.find("thead").find_all("div")[1].get_text()).group(1)
         for elem in filediv.find("tbody").find_all("tr"):
             bytesize = elem("td")[1]("span")[0]['title'].replace(",", "").replace(' bytes', '')
             filepath = os.path.join(basepath, elem("td")[0].string)
             tor.data['Filelist'][filepath] = bytesize
         # Check if trumpable
         if soup.find(id="trumpable_%s" % tor.ID):
             tor.data['Trumpable'] = [s.get_text() for s in soup.find(id="trumpable_%s" % tor.ID).find_all('span')]
         else:
             tor.data['Trumpable'] = []
Esempio n. 5
0
 def load_movie_json_data(self):
     """Load data from the movie page"""
     LOGGER.debug("Loading Torrent data from movie JSON page.")
     if "GroupId" not in self.data or not self.data["GroupId"]:
         movie_url = session.base_get("torrents.php", params={"torrentid": self.ID}).url
         self.data["GroupId"] = re.search(r"\?id=(\d+)", movie_url).group(1)
     movie_data = session.base_get(
         "torrents.php", params={"torrentid": self.ID, "id": self.data["GroupId"], "json": "1"}
     ).json()
     for tor in movie_data["Torrents"]:
         if int(tor["Id"]) == int(self.ID):
             # Fill in any optional fields
             for key in ["RemasterTitle"]:
                 self.data[key] = ""
             self.data.update(tor)
             break
Esempio n. 6
0
 def load_movie_html_data(self):
     """Get data from the parent movie's JSON data"""
     if 'GroupId' not in self.data or not self.data['GroupId']:
         movie_url = session.base_get('torrents.php', params={'torrentid': self.ID}).url
         self.data['GroupId'] = parse_qs(urlparse(movie_url).query)['id'][0]
     soup = bs4(session.base_get('torrents.php', params={'id': self.data['GroupId']}).content, "html.parser")
     filediv = soup.find("div", id="files_%s" % self.ID)
     self.data['Filelist'] = {}
     for elem in filediv.find("tbody").find_all("tr"):
         bytesize = elem("td")[1]("span")[0]['title'].replace(",", "").replace(' bytes', '')
         self.data['Filelist'][elem("td")[0].string] = bytesize
     # Check if trumpable
     if soup.find("trumpable_%s" % self.ID):
         self.data['Trumpable'] = [s.get_text() for s in soup.find("trumpable_%s" % self.ID).find('span')]
     else:
         self.data['Trumpable'] = []
Esempio n. 7
0
 def hnr_zip(self):
     """Download the zip file of all HnRs"""
     zip_file = session.base_get("snatchlist.php", params={"action": "hnrzip"})
     if zip_file.headers["Content-Type"] == "application/zip":
         return zip_file
     else:
         return None
Esempio n. 8
0
    def bookmarks(self, search_terms=None):
        """Fetch a list of movies the user has bookmarked

        :rtype: array of Movies"""
        search_terms = search_terms or {}
        search_terms.update({"userid": self.ID})
        req = session.base_get("bookmarks.php", params=search_terms)
        movies = []
        for movie in api.Util.snarf_cover_view_data(req.text):
            movie["Title"] = HTMLParser.HTMLParser().unescape(movie["Title"])
            movie["Torrents"] = []
            for group in movie["GroupingQualities"]:
                for torrent in group["Torrents"]:
                    torrent_re = (
                        r'&#(\d*);.*title="(.*?)">(.*?) / (.*?) / (.*?) / (.*?)[ <]'
                    )  # pylint: disable=line-too-long
                    match = re.search(torrent_re, torrent["Title"])
                    torrent["GoldenPopcorn"] = (
                        match.group(1) == "10047"
                    )  # 10047 = Unicode GP symbol pylint: disable=line-too-long
                    torrent["ReleaseName"] = HTMLParser.HTMLParser().unescape(match.group(2))
                    torrent["Codec"] = match.group(3)
                    torrent["Container"] = match.group(4)
                    torrent["Source"] = match.group(5)
                    torrent["Resolution"] = match.group(6)
                    movie["Torrents"].append(torrent)
            movies.append(Movie(data=movie))
        return movies
Esempio n. 9
0
 def hnr_zip(self):
     """Download the zip file of all HnRs"""
     zip_file = session.base_get('snatchlist.php', params={'action': 'hnrzip'})
     if zip_file.headers['Content-Type'] == 'application/zip':
         return zip_file
     else:
         return None
Esempio n. 10
0
 def load_movie_html_data(self):
     """Get data from the parent movie's JSON data"""
     if "GroupId" not in self.data or not self.data["GroupId"]:
         movie_url = session.base_get("torrents.php", params={"torrentid": self.ID}).url
         self.data["GroupId"] = parse_qs(urlparse(movie_url).query)["id"][0]
     soup = bs4(session.base_get("torrents.php", params={"id": self.data["GroupId"]}).content, "html.parser")
     filediv = soup.find("div", id="files_%s" % self.ID)
     self.data["Filelist"] = {}
     for elem in filediv.find("tbody").find_all("tr"):
         bytesize = elem("td")[1]("span")[0]["title"].replace(",", "").replace(" bytes", "")
         self.data["Filelist"][elem("td")[0].string] = bytesize
     # Check if trumpable
     if soup.find("trumpable_%s" % self.ID):
         self.data["Trumpable"] = [s.get_text() for s in soup.find("trumpable_%s" % self.ID).find("span")]
     else:
         self.data["Trumpable"] = []
Esempio n. 11
0
 def load_html_data(self):
     """Scrape all data from a movie's HTML page"""
     soup = bs4(session.base_get("torrents.php", params={'id': self.ID}).text, "html.parser")
     self.data['Cover'] = soup.find('img', class_='sidebar-cover-image')['src']
     # Title and Year
     match = re.match(r'(.*) \[(\d{4})\]', soup.find('h2', class_='page__title').encode_contents())
     self.data['Title'] = match.group(1)
     self.data['Year'] = match.group(2)
     # Genre tags
     self.data['Tags'] = []
     for tagbox in soup.find_all('div', class_="box_tags"):
         for tag in tagbox.find_all("li"):
             self.data['Tags'].append(tag.find('a').string)
     self.data['Directors'] = []
     for director in soup.find('h2', class_='page__title').find_all('a', class_='artist-info-link'):
         self.data['Directors'].append({'Name': director.string.strip()})
     # File list & trumpability
     for tor in self['Torrents']:
         # Get file list
         filediv = soup.find("div", id="files_%s" % tor.ID)
         tor.data['Filelist'] = {}
         basepath = re.match(r'\/(.*)\/', filediv.find("thead").find_all("div")[1].get_text()).group(1)
         for elem in filediv.find("tbody").find_all("tr"):
             bytesize = elem("td")[1]("span")[0]['title'].replace(",", "").replace(' bytes', '')
             filepath = os.path.join(basepath, elem("td")[0].string)
             tor.data['Filelist'][filepath] = bytesize
         # Check if trumpable
         if soup.find(id="trumpable_%s" % tor.ID):
             tor.data['Trumpable'] = [s.get_text() for s in soup.find(id="trumpable_%s" % tor.ID).find_all('span')]
         else:
             tor.data['Trumpable'] = []
Esempio n. 12
0
    def __init__(self, username=None, password=None, passkey=None):
        json = None
        self.cookies_file = os.path.expanduser(config.get('Main', 'cookiesFile'))
        
        LOGGER.info("Initiating login sequence.")
        password = (password or config.get('PTP', 'password'))
        username = (username or config.get('PTP', 'username'))
        passkey  = (passkey or config.get('PTP', 'passkey'))

        if os.path.isfile(self.cookies_file):                      # If cookies exists (a crude test to see if we're logged in)
            self.__loadCookies()
            session.max_redirects = 1
            try:
                req = session.base_get('torrents.php')
            except requests.exceptions.TooManyRedirects:
                if os.path.isfile(self.cookies_file):
                    os.remove(self.cookies_file)                    # Delete the cookies
                session.cookies = requests.cookies.RequestsCookieJar()
            session.max_redirects = 3
        if not os.path.isfile(self.cookies_file):
            if not password or not passkey or not username:
                print("Not enough info provided to login! Exiting...")
                exit(0)
            try:
                req = session.base_post('ajax.php?action=login',
                                        data = {'username': username,
                                                'password': password,
                                                'passkey' : passkey})
                json = req.json()
            except ValueError:
                if req.status_code == 200:
                    print("Could not parse returned JSON data. Exiting...")
                    exit(0)
                else:
                    if req.status_code == 429:
                        LOGGER.critical(req.text.strip())
                    req.raise_for_status()

            if json['Result'] != 'Ok':
                print("Failed to login. Please check the username, password, and passkey. Response: {0}".format(json))

            self.__saveCookies()
            req = session.base_get('index.php')

        print("Login successful!")
        self.current_user_id = re.search(r'user.php\?id=(\d+)', req.text).group(1)  # regex to capture user id that is all digits
        self.auth_key = re.search(r'auth=([0-9a-f]{32})', req.text).group(1)        # regex to capture hex auth key that is 32 digits long
Esempio n. 13
0
 def log(self):
     """Gets the PTP log"""
     soup = bs4(session.base_get('/log.php').content, "html.parser")
     ret_array = []
     for message in soup.find('table').find('tbody').find_all('tr'):
         ret_array.append((message.find('span', class_='time')['title'],
                           message.find('span', class_='log__message').get_text().lstrip().encode('UTF-8')))
     return ret_array
Esempio n. 14
0
 def contest_leaders(self):
     """Get data on who's winning"""
     LOGGER.debug("Fetching contest leaderboard")
     soup = bs4(session.base_get("contestleaders.php").content, "html.parser")
     ret_array = []
     for cell in soup.find('table', class_='table--panel-like').find('tbody').find_all('tr'):
         ret_array.append((cell.find_all('td')[1].get_text(), cell.find_all('td')[2].get_text()))
     return ret_array
Esempio n. 15
0
 def load_movie_json_data(self):
     """Load data from the movie page"""
     LOGGER.debug("Loading Torrent data from movie JSON page.")
     if 'GroupId' not in self.data or not self.data['GroupId']:
         movie_url = session.base_get('torrents.php', params={'torrentid': self.ID}).url
         self.data['GroupId'] = re.search(r'\?id=(\d+)', movie_url).group(1)
     movie_data = session.base_get('torrents.php',
                                   params={'torrentid': self.ID,
                                           'id': self.data['GroupId'],
                                           'json': '1'}).json()
     for tor in movie_data['Torrents']:
         if int(tor['Id']) == int(self.ID):
             # Fill in any optional fields
             for key in ['RemasterTitle']:
                 if key not in self.data:
                     self.data[key] = ''
             self.data.update(tor)
             break
Esempio n. 16
0
 def download_to_file(self, dest=None, name=None):
     """Convience method to download directly to a file"""
     req = session.base_get("torrents.php", params={"action": "download", "id": self.ID})
     if not dest:
         dest = config.get("Main", "downloadDirectory")
     if not name:
         name = re.search(r'filename="(.*)"', req.headers["Content-Disposition"]).group(1)
     with open(os.path.join(dest, name), "wb") as fileh:
         fileh.write(req.content)
     return os.path.join(dest, name)
Esempio n. 17
0
 def __init__(self, username=None, password=None, passkey=None):
     j = None
     self.cookies_file = os.path.expanduser(config.get('Main', 'cookiesFile'))
     LOGGER.info("Initiating login sequence.")
     password = (password or config.get('PTP', 'password'))
     username = (username or config.get('PTP', 'username'))
     passkey = (passkey or config.get('PTP', 'passkey'))
     if os.path.isfile(self.cookies_file):
         self.__load_cookies()
         # A really crude test to see if we're logged in
         session.max_redirects = 1
         try:
             req = session.base_get('torrents.php')
         except requests.exceptions.TooManyRedirects:
             if os.path.isfile(self.cookies_file):
                 os.remove(self.cookies_file)
             session.cookies = requests.cookies.RequestsCookieJar()
         session.max_redirects = 3
     if not os.path.isfile(self.cookies_file):
         if not password or not passkey or not username:
             raise PTPAPIException("Not enough info provided to log in.")
         try:
             req = session.base_post('ajax.php?action=login',
                                     data={"username": username,
                                           "password": password,
                                           "passkey": passkey})
             j = req.json()
         except ValueError:
             if req.status_code == 200:
                 raise PTPAPIException("Could not parse returned json data.")
             else:
                 if req.status_code == 429:
                     LOGGER.critical(req.text.strip())
                 req.raise_for_status()
         if j["Result"] != "Ok":
             raise PTPAPIException("Failed to log in. Please check the username, password and passkey. Response: %s" % j)
         self.__save_cookie()
         # Get some information that will be useful for later
         req = session.base_get('index.php')
     Util.raise_for_cloudflare(req.text)
     LOGGER.info("Login successful.")
     self.current_user_id = re.search(r'user.php\?id=(\d+)', req.text).group(1)
     self.auth_key = re.search(r'auth=([0-9a-f]{32})', req.text).group(1)
Esempio n. 18
0
 def load_json_data(self):
     """Load movie JSON data"""
     self.data.update(session.base_get("torrents.php",
                                       params={'id': self.ID,
                                               'json': '1'}).json())
     if 'ImdbId' not in self.data:
         self.data['ImdbId'] = ''
     if 'Directors' not in self.data:
         self.data['Directors'] = []
     self.conv_json_torrents()
Esempio n. 19
0
File: api.py Progetto: mza921/PTPAPI
 def need_for_seed(self, filters={}):
     """List torrents that need seeding"""
     data = Util.snarf_cover_view_data(
         session.base_get("needforseed.php", params=filters).content)
     torrents = []
     for m in data:
         torrent = m['GroupingQualities'][0]['Torrents'][0]
         torrent['Link'] = config.get('Main', 'baseURL') + bs4(
             torrent['Title'], 'lxml').find('a')['href']
         torrents.append(torrent)
     return torrents
Esempio n. 20
0
File: api.py Progetto: mza921/PTPAPI
 def collage(self, coll_id, search_terms={}):
     """Simplistic representation of a collage, might be split out later"""
     search_terms['id'] = coll_id
     req = session.base_get('collages.php', params=search_terms)
     movies = []
     for movie in Util.snarf_cover_view_data(req.text):
         movie['Torrents'] = []
         for group in movie['GroupingQualities']:
             movie['Torrents'].extend(group['Torrents'])
         movies.append(Movie(data=movie))
     return movies
Esempio n. 21
0
 def collage(self, coll_id, search_terms):
     """Simplistic representation of a collage, might be split out later"""
     search_terms['id'] = coll_id
     req = session.base_get('collages.php', params=search_terms)
     movies = []
     for movie in Util.snarf_cover_view_data(req.text):
         movie['Torrents'] = []
         for group in movie['GroupingQualities']:
             movie['Torrents'].extend(group['Torrents'])
         movies.append(Movie(data=movie))
     return movies
Esempio n. 22
0
    def ratings(self):
        """Fetch a list of rated movies

        :rtype: array of tuples with a Movie and a rating out of 100"""
        soup = bs4(session.base_get("user.php", params={"id": self.ID, "action": "ratings"}).text, "html.parser")
        ratings = []
        for row in soup.find(id="ratings_table").tbody.find_all("tr"):
            movie_id = re.search(r"id=(\d+)", row.find(class_="l_movie")["href"]).group(1)
            rating = row.find(id="user_rating_%s" % movie_id).text.rstrip("%")
            ratings.append((movie_id, rating))
        return ratings
Esempio n. 23
0
    def bookmarks(self, search_terms=None):
        """Fetch a list of movies the user has bookmarked

        :rtype: array of Movies"""
        search_terms = search_terms or {}
        search_terms.update({'userid': self.ID})
        req = session.base_get('bookmarks.php', params=search_terms)
        movies = []
        for movie in api.Util.snarf_cover_view_data(req.text):
            movies.append(Movie(data=movie))
        return movies
Esempio n. 24
0
 def download_to_dir(self, dest=None):
     """Convenience method to download directly to a directory"""
     req = session.base_get("torrents.php",
                            params={'action': 'download',
                                    'id': self.ID})
     if not dest:
         dest = config.get('Main', 'downloadDirectory')
     name = re.search(r'filename="(.*)"', req.headers['Content-Disposition']).group(1)
     dest = os.path.join(dest, name)
     with open(dest, 'wb') as fileh:
         fileh.write(req.content)
     return dest
Esempio n. 25
0
File: api.py Progetto: mza921/PTPAPI
 def contest_leaders(self):
     """Get data on who's winning"""
     LOGGER.debug("Fetching contest leaderboard")
     soup = bs4(
         session.base_get("contestleaders.php").content, "html.parser")
     ret_array = []
     for cell in soup.find(
             'table',
             class_='table--panel-like').find('tbody').find_all('tr'):
         ret_array.append((cell.find_all('td')[1].get_text(),
                           cell.find_all('td')[2].get_text()))
     return ret_array
Esempio n. 26
0
File: api.py Progetto: mza921/PTPAPI
 def log(self):
     """Gets the PTP log"""
     soup = bs4(session.base_get('log.php').content, "html.parser")
     ret_array = []
     for message in soup.find('table').find('tbody').find_all('tr'):
         ret_array.append(
             (message.find('span', class_='time')['title'],
              message.find(
                  'span',
                  class_='log__message').get_text().lstrip().encode('UTF-8')
              ))
     return ret_array
Esempio n. 27
0
    def inbox(self, page=1):
        """Fetch a list of messages from the user's inbox
        Incidentally update the number of messages"""
        soup = bs4(session.base_get('inbox.php', params={'page': page}).text, "html.parser")

        self.new_messages = self.__parse_new_messages(soup)

        for row in soup.find(id="messageformtable").tbody.find_all('tr'):
            yield {'Subject': row.find_all('td')[1].text.encode('UTF-8').strip(),
                   'Sender': row.find_all('td')[2].text,
                   'Date': row.find_all('td')[3].span['title'],
                   'ID': re.search(r'id=(\d+)', row.find_all('td')[1].a['href']).group(1),
                   'Unread': True if 'inbox-message--unread' in row['class'] else False}
Esempio n. 28
0
 def search(self, filters):
     """Perform a movie search"""
     if 'name' in filters:
         filters['searchstr'] = filters['name']
     filters['json'] = 'noredirect'
     ret_array = []
     for movie in session.base_get('torrents.php', params=filters).json()['Movies']:
         if 'Directors' not in movie:
             movie['Directors'] = []
         if 'ImdbId' not in movie:
             movie['ImdbId'] = '0'
         movie['Title'] = HTMLParser.HTMLParser().unescape(movie['Title'])
         ret_array.append(Movie(data=movie))
     return ret_array
Esempio n. 29
0
 def inbox_conv(self, conv_id):
     """Get a specific conversation from the inbox"""
     soup = bs4(session.base_get("inbox.php", params={"action": "viewconv", "id": conv_id}).text, "html.parser")
     messages = []
     for msg in soup.find_all("div", id=re.compile("^message"), class_="forum-post"):
         message = {}
         message["Text"] = msg.find("div", class_="forum-post__body").text.strip()
         username = msg.find("strong").find("a", class_="username")
         if username is None:
             message["User"] = "******"
         else:
             message["User"] = username.text.strip()
         message["Time"] = msg.find("span", class_="time").text.strip()
         messages.append(message)
     return {"Subject": soup.find("h2", class_="page__title").text, "Message": messages}
Esempio n. 30
0
    def inbox(self, page=1):
        """Fetch a list of messages from the user's inbox
        Incidentally update the number of messages"""
        soup = bs4(session.base_get("inbox.php", params={"page": page}).text, "html.parser")

        self.new_messages = self.__parse_new_messages(soup)

        for row in soup.find(id="messageformtable").tbody.find_all("tr"):
            yield {
                "Subject": row.find_all("td")[1].text.encode("UTF-8").strip(),
                "Sender": row.find_all("td")[2].text,
                "Date": row.find_all("td")[3].span["title"],
                "ID": re.search(r"id=(\d+)", row.find_all("td")[1].a["href"]).group(1),
                "Unread": True if "inbox-message--unread" in row["class"] else False,
            }
Esempio n. 31
0
    def ratings(self):
        """Fetch a list of rated movies

        :rtype: array of tuples with a Movie and a rating out of 100"""
        soup = bs4(session.base_get(
            'user.php',
            params={'id': self.ID, 'action': 'ratings'}
        ).text, "html.parser")
        ratings = []
        for row in soup.find(id='ratings_table').tbody.find_all('tr'):
            movie_id = re.search(r'id=(\d+)',
                                 row.find(class_='l_movie')['href']).group(1)
            rating = row.find(id='user_rating_%s' % movie_id).text.rstrip('%')
            ratings.append((movie_id, rating))
        return ratings
Esempio n. 32
0
File: api.py Progetto: mza921/PTPAPI
 def search(self, filters):
     """Perform a movie search"""
     if 'name' in filters:
         filters['searchstr'] = filters['name']
     filters['json'] = 'noredirect'
     ret_array = []
     for movie in session.base_get('torrents.php',
                                   params=filters).json()['Movies']:
         if 'Directors' not in movie:
             movie['Directors'] = []
         if 'ImdbId' not in movie:
             movie['ImdbId'] = '0'
         movie['Title'] = HTMLParser.HTMLParser().unescape(movie['Title'])
         ret_array.append(Movie(data=movie))
     return ret_array
Esempio n. 33
0
    def download(self):
        """ Download the torrent file to disk """
        req = session.base_get("torrents.php",
                               params={
                                   'action': 'download',
                                   'id': self.ID
                               })
        dest = config.get('Main', 'downloadDirectory')
        filename = re.search(r'filename="(.*)"',
                             req.headers['Content-Disposition']).group(
                                 1)  # Get the filename (something.torrent)
        dest = os.path.join(dest, filename)
        with open(dest, 'wb') as fileh:
            fileh.write(req.content)
        print("Downloaded {0} to {1}".format(filename, dest))

        return True
Esempio n. 34
0
 def inbox_conv(self, conv_id):
     """Get a specific conversation from the inbox"""
     soup = bs4(session.base_get('inbox.php', params={'action': 'viewconv', 'id': conv_id}).text, "html.parser")
     messages = []
     for msg in soup.find_all('div', id=re.compile('^message'), class_="forum-post"):
         message = {}
         message['Text'] = msg.find('div', class_="forum-post__body").text.strip()
         username = msg.find('strong').find('a', class_="username")
         if username is None:
             message['User'] = '******'
         else:
             message['User'] = username.text.strip()
         message['Time'] = msg.find('span', class_="time").text.strip()
         messages.append(message)
     return {
         'Subject': soup.find('h2', class_="page__title").text,
         'Message': messages
     }
Esempio n. 35
0
 def download(self):
     """Download the torrent contents"""
     req = session.base_get("torrents.php", params={"action": "download", "id": self.ID})
     return req.content
Esempio n. 36
0
File: api.py Progetto: mza921/PTPAPI
 def logout(self):
     """Forces a logout."""
     os.remove(self.cookies_file)
     return session.base_get('logout.php', params={'auth': self.auth_key})
Esempio n. 37
0
 def logout(self):
     """Forces a logout."""
     os.remove(self.cookies_file)
     return session.base_get('logout.php', params={'auth': self.auth_key})
Esempio n. 38
0
 def get_new_messages(self):
     """Update the number of messages"""
     soup = bs4(session.base_get('inbox.php').text, "html.parser")
     self.new_messages = self.__parse_new_messages(soup)
     return self.new_messages
Esempio n. 39
0
 def load_json_data(self):
     """Load movie JSON data"""
     self.data.update(session.base_get("torrents.php",
                                       params={'id': self.ID,
                                               'json': '1'}).json())
     self.conv_json_torrents()
Esempio n. 40
0
    def load_html_data(self):
        """Scrape all data from a movie's HTML page"""
        soup = bs4(
            session.base_get("torrents.php", params={
                'id': self.ID
            }).text, "html.parser")
        self.data['Cover'] = soup.find('img',
                                       class_='sidebar-cover-image')['src']
        # Title and Year
        match = re.match(
            r'(.*) \[(\d{4})\]',
            soup.find('h2', class_='page__title').encode_contents())
        self.data['Title'] = match.group(1)
        self.data['Year'] = match.group(2)
        # Genre tags
        self.data['Tags'] = []
        for tagbox in soup.find_all('div', class_="box_tags"):
            for tag in tagbox.find_all("li"):
                self.data['Tags'].append(tag.find('a').string)
        # Directors
        self.data['Directors'] = []
        for director in soup.find('h2', class_='page__title').find_all(
                'a', class_='artist-info-link'):
            self.data['Directors'].append({'Name': director.string.strip()})
        # Ratings
        rating = soup.find(id='ptp_rating_td')
        self.data['PtpRating'] = rating.find(id='user_rating').text.strip('%')
        self.data['PtpRatingCount'] = re.sub("\D", "",
                                             rating.find(id='user_total').text)
        your_rating = rating.find(id='ptp_your_rating').text
        if '?' in your_rating:
            self.data['PtpYourRating'] = None
            self.data['Seen'] = False
        elif re.sub("\D", "", your_rating) == '':
            self.data['PtpYourRating'] = None
            self.data['Seen'] = True
        else:
            self.data['PtpYourRating'] = re.sub("\D", "", your_rating)
            self.data['Seen'] = True
        # Have we snatched this
        self.data['Snatched'] = False
        if soup.find(class_='torrent-info-link--user-snatched') or soup.find(
                class_='torrent-info-link--user-seeding'):
            self.data['Snatched'] = True

        # File list & trumpability for torrents
        for tor in self['Torrents']:
            # Get file list
            filediv = soup.find("div", id="files_%s" % tor.ID)
            tor.data['Filelist'] = {}
            basepath = re.match(
                r'\/(.*)\/',
                filediv.find("thead").find_all("div")[1].get_text()).group(1)
            for elem in filediv.find("tbody").find_all("tr"):
                bytesize = elem("td")[1]("span")[0]['title'].replace(
                    ",", "").replace(' bytes', '')
                filepath = os.path.join(basepath, elem("td")[0].string)
                tor.data['Filelist'][filepath] = bytesize
            # Check if trumpable
            if soup.find(id="trumpable_%s" % tor.ID):
                tor.data['Trumpable'] = [
                    s.get_text() for s in soup.find(id="trumpable_%s" %
                                                    tor.ID).find_all('span')
                ]
            else:
                tor.data['Trumpable'] = []
Esempio n. 41
0
 def download(self):
     """Download the torrent contents"""
     req = session.base_get("torrents.php",
                            params={'action': 'download',
                                    'id': self.ID})
     return req.content
Esempio n. 42
0
 def get_new_messages(self):
     """Update the number of messages"""
     soup = bs4(session.base_get("inbox.php").text, "html.parser")
     self.new_messages = self.__parse_new_messages(soup)
     return self.new_messages
Esempio n. 43
0
 def need_for_seed(self):
     """List torrents that need seeding"""
     data = Util.snarf_cover_view_data(session.base_get("needforseed.php").content)
     return [t['GroupingQualities'][0]['Torrents'][0] for t in data]