예제 #1
0
    def checkForUpdateWindows(self):
        try:
            data = urllib2.urlopen(self.downloads, timeout = self.timeout).read()
        except (IOError, URLError):
            log.error('Failed to open %s.' % self.downloads)
            return False

        try:
            html = BeautifulSoup(data)
            results = html.findAll('a', attrs = {'href':re.compile('/downloads/')})

            for link in results:
                if 'windows' in str(link.parent).lower():
                    downloadUrl = 'http://github.com' + link.get('href').replace(' ', '%20')
                    break

            if 'r' + str(version.windows) in downloadUrl:
                return False

            return downloadUrl

        except AttributeError:
            log.debug('Nothing found.')

        return False
예제 #2
0
    def findViaAlternative(self, movie):
        results = {'480p':[], '720p':[], '1080p':[]}

        arguments = urlencode({
            's':movie
        })
        url = "%s?%s" % (self.backupUrl, arguments)
        log.info('Searching %s' % url)

        try:
            data = urllib2.urlopen(url, timeout = self.timeout).read()
        except (IOError, URLError):
            log.error('Failed to open %s.' % url)
            return results

        try:
            tables = SoupStrainer('div')
            html = BeautifulSoup(data, parseOnlyThese = tables)
            resultTable = html.findAll('h2', text = re.compile(movie))

            for h2 in resultTable:
                if 'trailer' in h2.lower():
                    parent = h2.parent.parent.parent
                    trailerLinks = parent.findAll('a', text = re.compile('480p|720p|1080p'))
                    try:
                        for trailer in trailerLinks:
                            results[trailer].insert(0, trailer.parent['href'])
                    except:
                        pass


        except AttributeError:
            log.debug('No trailers found in via alternative.')

        return results
예제 #3
0
    def checkForUpdateWindows(self):
        try:
            data = urllib2.urlopen(self.downloads, timeout = self.timeout).read()
        except (IOError, URLError):
            log.error('Failed to open %s.' % self.downloads)
            return False

        try:
            html = BeautifulSoup(data)
            results = html.findAll('a', attrs = {'href':re.compile('/downloads/')})

            for link in results:
                if 'windows' in str(link.parent).lower():
                    downloadUrl = 'http://github.com' + link.get('href').replace(' ', '%20')
                    break

            if 'r' + str(version.windows) in downloadUrl:
                return False

            return downloadUrl

        except AttributeError:
            log.debug('Nothing found.')

        return False
예제 #4
0
파일: eta.py 프로젝트: bjensen/CouchPotato
    def getDetails(self, id):
        url = self.detailUrl + str(id)

        log.info('Scanning %s.' % url)

        try:
            data = urllib2.urlopen(url, timeout = self.timeout).read()
        except (IOError, URLError):
            log.error('Failed to open %s.' % url)
            return False

        # Search for theater release
        theaterDate = 0
        try:
            theaterLink = SoupStrainer('a', href = re.compile('/month_theaters.html\?'))
            theater = BeautifulSoup(data, parseOnlyThese = theaterLink)
            theaterDate = int(time.mktime(parse(theater.a.contents[0]).timetuple()))
        except AttributeError:
            log.debug('No Theater release info found.')

        # Search for dvd release date
        dvdDate = 0
        try:
            try:
                dvdLink = SoupStrainer('a', href = re.compile('/month_video.html\?'))
                dvd = BeautifulSoup(data, parseOnlyThese = dvdLink)
                dvdDate = int(time.mktime(parse(dvd.a.contents[0]).timetuple()))
            except:
                pass

            # Try left column
            if not dvdDate:
                dvdReleases = SoupStrainer('p', text = re.compile('Released'))
                dvd = BeautifulSoup(data, parseOnlyThese = dvdReleases)
                for date in dvd:
                    foundDate = int(time.mktime(parse(date.replace('Released', '')).timetuple()))
                    dvdDate = foundDate if foundDate > dvdDate else dvdDate

        except AttributeError:
            log.debug('No DVD release info found.')

        # Does it have blu-ray release?
        bluray = []
        try:
            bees = SoupStrainer('b')
            soup = BeautifulSoup(data, parseOnlyThese = bees)
            bluray = soup.findAll('b', text = re.compile('Blu-ray'))
        except AttributeError:
            log.info('No Bluray release info found.')

        dates = {
            'id': id,
            'dvd': dvdDate,
            'theater': theaterDate,
            'bluray': len(bluray) > 0
        }
        log.debug('Found: %s' % dates)
        return dates
예제 #5
0
        arguments = urlencode({
            's':movie
        })
        url = "%s?%s" % (self.backupUrl, arguments)
        log.debug('Searching %s' % url)

        try:
            data = urllib2.urlopen(url, timeout = self.timeout).read()
        except (IOError, URLError), e:
            log.debug('Failed to open %s. %s' % (url, e))
            return results

        try:
            tables = SoupStrainer('div')
            html = BeautifulSoup(data, parseOnlyThese = tables)
            resultTable = html.findAll('h2', text = re.compile(movie))

            for h2 in resultTable:
                if 'trailer' in h2.lower():
                    parent = h2.parent.parent.parent
                    trailerLinks = parent.findAll('a', text = re.compile('480p|720p|1080p'))
                    try:
                        for trailer in trailerLinks:
                            results[trailer].insert(0, trailer.parent['href'])
                    except:
                        pass


        except AttributeError:
            log.debug('No trailers found in via alternative.')