Exemple #1
0
    def __init__(self):
        super(Trakt, self).__init__()

        addApiView('automation.trakt.auth_url', self.getAuthorizationUrl)
        addApiView('automation.trakt.credentials', self.getCredentials)

        fire_event('schedule.interval',
                   'updater.check',
                   self.refreshToken,
                   hours=24)
        add_event('app.load', self.refreshToken)
Exemple #2
0
    def refreshToken(self):

        token = self.conf('automation_oauth_token')
        refresh_token = self.conf('automation_oauth_refresh')
        if token and refresh_token:

            prop_name = 'last_trakt_refresh'
            last_refresh = int(Env.prop(prop_name, default=0))

            if last_refresh < time.time() - 4838400:  # refresh every 8 weeks
                log.debug('Refreshing trakt token')

                url = self.urls['refresh_token'] + '?token=' + self.conf(
                    'automation_oauth_refresh')
                data = fire_event('cp.api_call',
                                  url,
                                  cache_timeout=0,
                                  single=True)
                if data and 'oauth' in data and 'refresh' in data:
                    log.debug('Oauth refresh: %s', data)
                    self.conf('automation_oauth_token',
                              value=data.get('oauth'))
                    self.conf('automation_oauth_refresh',
                              value=data.get('refresh'))
                    Env.prop(prop_name, value=int(time.time()))
                else:
                    log.error(
                        'Failed refreshing Trakt token, please re-register in settings'
                    )

        elif token and not refresh_token:
            log.error(
                'Refresh token is missing, please re-register Trakt for autorefresh of the token in the future'
            )
Exemple #3
0
    def getMovie(self, url):
        name = split_string(split_string(url, '/ijw_')[-1], '/')[0]

        if name.startswith('ijw_'):
            name = name[4:]

        year_name = fire_event('scanner.name_year', name, single=True)

        return self.search(year_name.get('name'), year_name.get('year'))
Exemple #4
0
    def getChartList(self):
        cache_key = 'bluray.charts'
        movie_list = {
            'name': 'Blu-ray.com - New Releases',
            'url': self.display_url,
            'order': self.chart_order,
            'list': self.getCache(cache_key) or []
        }

        if not movie_list['list']:
            movie_ids = []
            max_items = 10
            rss_movies = self.getRSSData(self.rss_url)

            for movie in rss_movies:
                name = self.get_text_element(
                    movie,
                    'title').lower().split('blu-ray')[0].strip('(').rstrip()
                year = self.get_text_element(
                    movie, 'description').split('|')[1].strip('(').strip()

                if not name.find(
                        '/'
                ) == -1:  # make sure it is not a double movie release
                    continue

                movie = self.search(name, year)

                if movie:

                    if movie.get('imdb') in movie_ids:
                        continue

                    is_movie = fire_event('movie.is_movie',
                                          identifier=movie.get('imdb'),
                                          single=True)
                    if not is_movie:
                        continue

                    movie_ids.append(movie.get('imdb'))
                    movie_list['list'].append(movie)
                    if len(movie_list['list']) >= max_items:
                        break

            if not movie_list['list']:
                return

            self.setCache(cache_key, movie_list['list'], timeout=259200)

        return [movie_list]
Exemple #5
0
    def getMovie(self, url):

        cookie = {'Cookie': 'welcomeScreen=welcome_screen'}

        try:
            data = self.urlopen(url, headers=cookie)
        except:
            return

        html = BeautifulSoup(data)
        name = html.find('meta', {'name': 'title'})['content'][:-9].strip()
        name_year = fire_event('scanner.name_year', name, single=True)
        name = name_year.get('name')
        year = name_year.get('year')

        return self.search(name, year)
Exemple #6
0
    def getIMDBids(self):

        movies = []
        retrieved_movies = self.getJsonData(self.url)

        if retrieved_movies:
            for movie in retrieved_movies:
                imdb_id = movie.get('imdb_id')
                info = fire_event('movie.info',
                                  identifier=imdb_id,
                                  extended=False,
                                  merge=True)
                if self.isMinimalMovie(info):
                    movies.append(imdb_id)

        return movies
Exemple #7
0
    def getMovie(self, url):

        try:
            data = self.getUrl(url)
        except:
            return

        try:
            title = re.findall("<title>(.*)</title>", data)
            title = title[0].split(' - Rotten')[0].replace('&nbsp;', ' ').decode('unicode_escape')
            name_year = fire_event('scanner.name_year', title, single=True)

            name = name_year.get('name')
            year = name_year.get('year')

            if name and year:
                return self.search(name, year)

        except:
            log.error('Failed parsing page for title and year: %s', traceback.format_exc())
Exemple #8
0
 def getRequestHeaders(self):
     return {'User-Agent': fire_event('app.version', single=True)}