Example #1
0
    def getJsonData(self, url, **kwargs):

        cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {})))
        data = self.getCache(cache_key, url, **kwargs)

        if data:
            try:
                return json.loads(data)
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

        return []
Example #2
0
    def getRSSData(self, url, item_path = 'channel/item', **kwargs):

        cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {})))
        data = self.getCache(cache_key, url, **kwargs)

        if data and len(data) > 0:
            try:
                data = XMLTree.fromstring(data)
                return self.getElements(data, item_path)
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

        return []
Example #3
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = self.conf('automation_urls_use').split(',')

        index = -1
        for rss_url in self.conf('automation_urls').split(','):

            index += 1
            if not enablers[index]:
                continue

            prop_name = 'automation.moviesio.last_update.%s' % md5(rss_url)
            last_update = float(Env.prop(prop_name, default = 0))

            last_movie_added = 0
            try:
                cache_key = 'imdb.rss.%s' % md5(rss_url)

                rss_data = self.getCache(cache_key, rss_url, headers = {'Referer': ''})
                data = XMLTree.fromstring(rss_data)
                rss_movies = self.getElements(data, 'channel/item')

                for movie in rss_movies:
                    created = int(time.mktime(parse(self.getTextElement(movie, "pubDate")).timetuple()))

                    if created > last_movie_added:
                        last_movie_added = created
                    if created <= last_update:
                        continue

                    nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, "title"), single = True)
                    imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True)

                    if not imdb:
                        continue

                    movies.append(imdb)
            except ParseError:
                log.debug('Failed loading Movies.io watchlist, probably empty: %s', (rss_url))
            except:
                log.error('Failed loading Movies.io watchlist: %s %s', (rss_url, traceback.format_exc()))

            Env.prop(prop_name, last_movie_added)

        return movies
Example #4
0
    def decorated(*args, **kwargs):
        auth = getattr(request, 'authorization')
        if Env.setting('username') and Env.setting('password'):
            if (not auth or not check_auth(auth.username.decode('latin1'), md5(auth.password.decode('latin1').encode(Env.get('encoding'))))):
                return authenticate()

        return f(*args, **kwargs)
Example #5
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []
        RSSMovie = {'name': 'placeholder', 'year' : 'placeholder'}

        cache_key = 'kinepolis.%s' % md5(self.rss_url)
        rss_data = self.getCache(cache_key, self.rss_url)
        data = XMLTree.fromstring(rss_data)

        if data is not None:
            rss_movies = self.getElements(data, 'channel/item')

            for movie in rss_movies:
                RSSMovie['name'] = self.getTextElement(movie, "title")
                currentYear = datetime.datetime.now().strftime("%Y")
                RSSMovie['year'] = currentYear

                log.debug('Release found: %s.', RSSMovie)
                imdb = self.getIMDBFromTitle(RSSMovie['name'], RSSMovie['year'])

                if imdb:
                    movies.append(imdb['imdb'])

        return movies
Example #6
0
    def getCache(self, cache_key, url=None, **kwargs):
        cache_key_md5 = md5(cache_key)
        cache = Env.get("cache").get(cache_key_md5)
        if cache:
            if not Env.get("dev"):
                log.debug("Getting cache %s", cache_key)
            return cache

        if url:
            try:

                cache_timeout = 300
                if kwargs.get("cache_timeout"):
                    cache_timeout = kwargs.get("cache_timeout")
                    del kwargs["cache_timeout"]

                data = self.urlopen(url, **kwargs)
                if data:
                    self.setCache(cache_key, data, timeout=cache_timeout)
                return data
            except:
                if not kwargs.get("show_error", True):
                    raise

                return ""
Example #7
0
File: main.py Project: Arcylus/PBI
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        cache_key = 'bluray.%s' % md5(self.rss_url)
        rss_data = self.getCache(cache_key, self.rss_url)
        data = XMLTree.fromstring(rss_data)

        if data is not None:
            rss_movies = self.getElements(data, 'channel/item')

            for movie in rss_movies:
                name = self.getTextElement(movie, "title").lower().split("blu-ray")[0].strip("(").rstrip()
                year = self.getTextElement(movie, "description").split("|")[1].strip("(").strip()

                if not name.find("/") == -1: # make sure it is not a double movie release
                    continue

                if tryInt(year) < self.getMinimal('year'):
                    continue

                imdb = self.search(name, year)

                if imdb:
                    if self.isMinimalMovie(imdb):
                        movies.append(imdb['imdb'])

        return movies
Example #8
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = self.conf('automation_urls_use').split(',')

        index = -1
        for rss_url in self.conf('automation_urls').split(','):

            index += 1
            if not enablers[index]:
                continue
            elif 'rss.imdb' not in rss_url:
                log.error('This isn\'t the correct url.: %s', rss_url)
                continue

            try:
                cache_key = 'imdb.rss.%s' % md5(rss_url)

                rss_data = self.getCache(cache_key, rss_url)
                data = XMLTree.fromstring(rss_data)
                rss_movies = self.getElements(data, 'channel/item')

                for movie in rss_movies:
                    imdb = getImdb(self.getTextElement(movie, "link"))
                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s', (rss_url, traceback.format_exc()))

        return movies
Example #9
0
    def getCache(self, cache_key, url = None, **kwargs):

        use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files')

        if use_cache:
            cache_key_md5 = md5(cache_key)
            cache = Env.get('cache').get(cache_key_md5)
            if cache:
                if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
                return cache

        if url:
            try:

                cache_timeout = 300
                if 'cache_timeout' in kwargs:
                    cache_timeout = kwargs.get('cache_timeout')
                    del kwargs['cache_timeout']

                data = self.urlopen(url, **kwargs)
                if data and cache_timeout > 0 and use_cache:
                    self.setCache(cache_key, data, timeout = cache_timeout)
                return data
            except:
                if not kwargs.get('show_error', True):
                    raise

                log.debug('Failed getting cache: %s', (traceback.format_exc(0)))
                return ''
Example #10
0
    def suggestView(self, **kwargs):

        movies = splitString(kwargs.get('movies', ''))
        ignored = splitString(kwargs.get('ignored', ''))
        limit = kwargs.get('limit', 6)

        if not movies or len(movies) == 0:
            db = get_session()
            active_movies = db.query(Movie) \
                .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
            movies = [x.library.identifier for x in active_movies]

        if not ignored or len(ignored) == 0:
            ignored = splitString(Env.prop('suggest_ignore', default = ''))

        cached_suggestion = self.getCache('suggestion_cached')
        if cached_suggestion:
            suggestions = cached_suggestion
        else:
            suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
            self.setCache(md5(ss('suggestion_cached')), suggestions, timeout = 6048000) # Cache for 10 weeks

        return {
            'success': True,
            'count': len(suggestions),
            'suggestions': suggestions[:limit]
        }
Example #11
0
File: base.py Project: Arcylus/PBI
    def getCache(self, cache_key, url = None, **kwargs):
        cache_key = md5(ss(cache_key))
        cache = Env.get('cache').get(cache_key)
        if cache:
            if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
            return cache

        if url:
            try:

                cache_timeout = 300
                if kwargs.get('cache_timeout'):
                    cache_timeout = kwargs.get('cache_timeout')
                    del kwargs['cache_timeout']

                data = self.urlopen(url, **kwargs)

                if data:
                    self.setCache(cache_key, data, timeout = cache_timeout)
                return data
            except:
                if not kwargs.get('show_error', True):
                    raise

                return ''
Example #12
0
    def getCache(self, cache_key, url=None, **kwargs):

        use_cache = not len(kwargs.get("data", {})) > 0 and not kwargs.get("files")

        if use_cache:
            cache_key_md5 = md5(cache_key)
            cache = Env.get("cache").get(cache_key_md5)
            if cache:
                if not Env.get("dev"):
                    log.debug("Getting cache %s", cache_key)
                return cache

        if url:
            try:

                cache_timeout = 300
                if "cache_timeout" in kwargs:
                    cache_timeout = kwargs.get("cache_timeout")
                    del kwargs["cache_timeout"]

                data = self.urlopen(url, **kwargs)
                if data and cache_timeout > 0 and use_cache:
                    self.setCache(cache_key, data, timeout=cache_timeout)
                return data
            except:
                if not kwargs.get("show_error", True):
                    raise

                log.debug("Failed getting cache: %s", (traceback.format_exc(0)))
                return ""
Example #13
0
File: main.py Project: Arcylus/PBI
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
        urls = splitString(self.conf('automation_urls'))

        index = -1
        for url in urls:

            index += 1
            if not enablers[index]:
                continue

            try:
                cache_key = 'imdb.rss.%s' % md5(url)
                rss_data = self.getCache(cache_key, url)
                imdbs = getImdb(rss_data, multiple = True)

                for imdb in imdbs:
                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc()))

        return movies
Example #14
0
    def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None):

        # Combine with previous suggestion_cache
        cached_suggestion = self.getCache('suggestion_cached')
        new_suggestions = []

        if ignore_imdb:
            for cs in cached_suggestion:
                if cs.get('imdb') != ignore_imdb:
                    new_suggestions.append(cs)

        # Get new results and add them
        if len(new_suggestions) - 1 < limit:

            db = get_session()
            active_movies = db.query(Movie) \
                .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
            movies = [x.library.identifier for x in active_movies]

            if ignored:
                ignored.extend([x.get('imdb') for x in new_suggestions])

            suggestions = fireEvent('movie.suggest', movies = movies, ignore = list(set(ignored)), single = True)

            if suggestions:
                new_suggestions.extend(suggestions)

        self.setCache(md5(ss('suggestion_cached')), new_suggestions, timeout = 6048000)

        return new_suggestions
Example #15
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = self.conf('automation_urls_use').split(',')

        index = -1
        for rss_url in self.conf('automation_urls').split(','):

            index += 1
            if not enablers[index]:
                continue
            elif 'rss.imdb' not in rss_url:
                log.error('This isn\'t the correct url.: %s' % rss_url)
                continue

            prop_name = 'automation.imdb.last_update.%s' % md5(rss_url)
            last_update = float(Env.prop(prop_name, default = 0))

            try:
                cache_key = 'imdb.rss.%s' % md5(rss_url)

                rss_data = self.getCache(cache_key, rss_url)
                data = XMLTree.fromstring(rss_data)
                rss_movies = self.getElements(data, 'channel/item')

                for movie in rss_movies:
                    created = int(time.mktime(parse(self.getTextElement(movie, "pubDate")).timetuple()))
                    imdb = getImdb(self.getTextElement(movie, "link"))

                    if not imdb or created < last_update:
                        continue

                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s' % (rss_url, traceback.format_exc()))

            Env.prop(prop_name, time.time())

        return movies
Example #16
0
    def download(self, data, movie, manual = False):

        snatched_status = fireEvent('status.get', 'snatched', single = True)

        # Download movie to temp
        filedata = None
        if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
            filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
            if filedata == 'try_next':
                return filedata

        successful = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)

        if successful:

            try:
                # Mark release as snatched
                db = get_session()
                rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
                if rls:
                    rls.status_id = snatched_status.get('id')
                    db.commit()

                    log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
                    snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
                    log.info(snatch_message)
                    fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())

                # If renamer isn't used, mark movie done
                if not Env.setting('enabled', 'renamer'):
                    active_status = fireEvent('status.get', 'active', single = True)
                    done_status = fireEvent('status.get', 'done', single = True)
                    try:
                        if movie['status_id'] == active_status.get('id'):
                            for profile_type in movie['profile']['types']:
                                if rls and profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
                                    log.info('Renamer disabled, marking movie as finished: %s', log_movie)

                                    # Mark release done
                                    rls.status_id = done_status.get('id')
                                    db.commit()

                                    # Mark movie done
                                    mvie = db.query(Movie).filter_by(id = movie['id']).first()
                                    mvie.status_id = done_status.get('id')
                                    db.commit()
                    except:
                        log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())

            except:
                log.error('Failed marking movie finished: %s', traceback.format_exc())

            return True

        log.info('Tried to download, but none of the downloaders are enabled')
        return False
Example #17
0
    def getJsonData(self, url, **kwargs):

        data = self.getCache(md5(url), url, **kwargs)

        if data:
            try:
                return json.loads(data)
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

        return []
Example #18
0
    def guess(self, files, extra = {}, loose = False):

        # Create hash for cache
        hash = md5(str([f.replace('.' + getExt(f), '') for f in files]))
        cached = self.getCache(hash)
        if cached and extra is {}: return cached

        for cur_file in files:
            size = (os.path.getsize(cur_file) / 1024 / 1024) if os.path.isfile(cur_file) else 0
            words = re.split('\W+', cur_file.lower())

            for quality in self.all():

                # Check tags
                if quality['identifier'] in words:
                    log.debug('Found via identifier "%s" in %s' % (quality['identifier'], cur_file))
                    return self.setCache(hash, quality)

                if list(set(quality.get('alternative', [])) & set(words)):
                    log.debug('Found %s via alt %s in %s' % (quality['identifier'], quality.get('alternative'), cur_file))
                    return self.setCache(hash, quality)

                for tag in quality.get('tags', []):
                    if isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words):
                        log.debug('Found %s via tag %s in %s' % (quality['identifier'], quality.get('tags'), cur_file))
                        return self.setCache(hash, quality)

                if list(set(quality.get('tags', [])) & set(words)):
                    log.debug('Found %s via tag %s in %s' % (quality['identifier'], quality.get('tags'), cur_file))
                    return self.setCache(hash, quality)

                # Check on unreliable stuff
                if loose:

                    # Last check on resolution only
                    if quality.get('width', 480) == extra.get('resolution_width', 0):
                        log.debug('Found %s via resolution_width: %s == %s' % (quality['identifier'], quality.get('width', 480), extra.get('resolution_width', 0)))
                        return self.setCache(hash, quality)

                    # Check extension + filesize
                    if list(set(quality.get('ext', [])) & set(words)) and size >= quality['size_min'] and size <= quality['size_max']:
                        log.debug('Found %s via ext and filesize %s in %s' % (quality['identifier'], quality.get('ext'), words))
                        return self.setCache(hash, quality)


        # Try again with loose testing
        if not loose:
            quality = self.guess(files, extra = extra, loose = True)
            if quality:
                return self.setCache(hash, quality)

        log.debug('Could not identify quality for: %s' % files)
        return None
Example #19
0
    def get(self):
        api = None
        username = Env.setting('username')
        password = Env.setting('password')

        if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password):
            api = Env.setting('api_key')

        self.write({
            'success': api is not None,
            'api_key': api
        })
Example #20
0
    def call(self, method_url):

        if self.conf('automation_password'):
            headers = {
               'Authorization': "Basic %s" % base64.encodestring('%s:%s' % (self.conf('automation_username'), self.conf('automation_password')))[:-1]
            }
        else:
            headers = {}

        cache_key = 'trakt.%s' % md5(method_url)
        json_string = self.getCache(cache_key, self.urls['base'] + method_url, headers = headers)
        return json.loads(json_string)
Example #21
0
    def getRSSData(self, url, item_path = 'channel/item', **kwargs):

        data = self.getCache(md5(url), url, **kwargs)

        if data:
            try:
                data = XMLTree.fromstring(data)
                return self.getElements(data, item_path)
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

        return []
Example #22
0
def getApiKey():

    api = None
    params = getParams()
    username = Env.setting('username')
    password = Env.setting('password')

    if (params.get('u') == md5(username) or not username) and (params.get('p') == password or not password):
        api = Env.setting('api_key')

    return jsonified({
        'success': api is not None,
        'api_key': api
    })
Example #23
0
    def download(self, url = '', dest = None, overwrite = False):

        try:
            filedata = self.urlopen(url)
        except:
            return False

        if not dest: # to Cache
            dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url)))

        if overwrite or not os.path.isfile(dest):
            self.createFile(dest, filedata, binary = True)

        return dest
Example #24
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []
        headers = {}

        for csv_url in self.conf('automation_urls').split(','):
            prop_name = 'automation.imdb.last_update.%s' % md5(csv_url)
            last_update = float(Env.prop(prop_name, default = 0))

            try:
                cache_key = 'imdb_csv.%s' % md5(csv_url)
                csv_data = self.getCache(cache_key, csv_url)
                csv_reader = csv.reader(StringIO.StringIO(csv_data))
                if not headers:
                    nr = 0
                    for column in csv_reader.next():
                        headers[column] = nr
                        nr += 1

                for row in csv_reader:
                    created = int(time.mktime(parse(row[headers['created']]).timetuple()))
                    if created < last_update:
                        continue

                    imdb = row[headers['const']]
                    if imdb:
                        movies.append(imdb)
            except:
                log.error('Failed loading IMDB watchlist: %s %s' % (csv_url, traceback.format_exc()))

            Env.prop(prop_name, time.time())


        return movies
Example #25
0
    def download(self, data, movie, manual=False):

        snatched_status = fireEvent("status.get", "snatched", single=True)

        # Download movie to temp
        filedata = None
        if data.get("download") and (ismethod(data.get("download")) or isfunction(data.get("download"))):
            filedata = data.get("download")(url=data.get("url"), nzb_id=data.get("id"))
            if filedata is "try_next":
                return filedata

        successful = fireEvent("download", data=data, movie=movie, manual=manual, single=True, filedata=filedata)

        if successful:

            # Mark release as snatched
            db = get_session()
            rls = db.query(Release).filter_by(identifier=md5(data["url"])).first()
            rls.status_id = snatched_status.get("id")
            db.commit()

            log_movie = "%s (%s) in %s" % (getTitle(movie["library"]), movie["library"]["year"], rls.quality.label)
            snatch_message = 'Snatched "%s": %s' % (data.get("name"), log_movie)
            log.info(snatch_message)
            fireEvent("movie.snatched", message=snatch_message, data=rls.to_dict())

            # If renamer isn't used, mark movie done
            if not Env.setting("enabled", "renamer"):
                active_status = fireEvent("status.get", "active", single=True)
                done_status = fireEvent("status.get", "done", single=True)
                try:
                    if movie["status_id"] == active_status.get("id"):
                        for profile_type in movie["profile"]["types"]:
                            if profile_type["quality_id"] == rls.quality.id and profile_type["finish"]:
                                log.info("Renamer disabled, marking movie as finished: %s" % log_movie)

                                # Mark release done
                                rls.status_id = done_status.get("id")
                                db.commit()

                                # Mark movie done
                                mvie = db.query(Movie).filter_by(id=movie["id"]).first()
                                mvie.status_id = done_status.get("id")
                                db.commit()
                except Exception, e:
                    log.error("Failed marking movie finished: %s %s" % (e, traceback.format_exc()))

            # db.close()
            return True
Example #26
0
    def post(self, *args, **kwargs):

        api = None

        username = Env.setting('username')
        password = Env.setting('password')

        if (self.get_argument('username') == username or not username) and (md5(self.get_argument('password')) == password or not password):
            api = Env.setting('api_key')

        if api:
            remember_me = tryInt(self.get_argument('remember_me', default = 0))
            self.set_secure_cookie('user', api, expires_days = 30 if remember_me > 0 else None)

        self.redirect(Env.get('web_base'))
Example #27
0
    def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = {}):

        if not dest: # to Cache
            dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url)))

        if not overwrite and os.path.isfile(dest):
            return dest

        try:
            filedata = self.urlopen(url, **urlopen_kwargs)
        except:
            return False

        self.createFile(dest, filedata, binary = True)
        return dest
Example #28
0
    def download(self, url="", dest=None, overwrite=False):

        if not dest:  # to Cache
            dest = os.path.join(Env.get("cache_dir"), "%s.%s" % (md5(url), getExt(url)))

        if not overwrite and os.path.isfile(dest):
            return dest

        try:
            filedata = self.urlopen(url)
        except:
            return False

        self.createFile(dest, filedata, binary=True)
        return dest
Example #29
0
    def imdbMatch(self, url, imdbId):
        if getImdb(url) == imdbId:
            return True

        if url[:4] == 'http':
            try:
                cache_key = md5(url)
                data = self.getCache(cache_key, url)
            except IOError:
                log.error('Failed to open %s.', url)
                return False

            return getImdb(data) == imdbId

        return False
Example #30
0
    def download(self, data = None, media = None, filedata = None):
        if not media: media = {}
        if not data: data = {}

        if not filedata:
            log.error('Unable to get NZB file: %s', traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get('name'))

        nzb_name = ss('%s.nzb' % self.createNzbName(data, media))

        url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc
        rpc = xmlrpclib.ServerProxy(url)

        try:
            if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
                log.debug('Successfully connected to NZBGet')
            else:
                log.info('Successfully connected to NZBGet, but unable to send a message')
        except socket.error:
            log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
            return False
        except xmlrpclib.ProtocolError as e:
            if e.errcode == 401:
                log.error('Password is incorrect.')
            else:
                log.error('Protocol Error: %s', e)
            return False

        if re.search(r"^0", rpc.version()):
            xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip()))
        else:
            xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip()))

        if xml_response:
            log.info('NZB sent successfully to NZBGet')
            nzb_id = md5(data['url'])  # about as unique as they come ;)
            couchpotato_id = "couchpotato=" + nzb_id
            groups = rpc.listgroups()
            file_id = [item['LastID'] for item in groups if item['NZBFilename'] == nzb_name]
            confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id, file_id)
            if confirmed:
                log.debug('couchpotato parameter set in nzbget download')
            return self.downloadReturnId(nzb_id)
        else:
            log.error('NZBGet could not add %s to the queue.', nzb_name)
            return False
Example #31
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
        urls = splitString(self.conf('automation_urls'))

        namespace = 'http://www.w3.org/2005/Atom'
        namespaceIM = 'http://itunes.apple.com/rss'

        index = -1
        for url in urls:

            index += 1
            if not enablers[index]:
                continue

            try:
                cache_key = 'itunes.rss.%s' % md5(url)
                rss_data = self.getCache(cache_key, url)

                data = XMLTree.fromstring(rss_data)

                if data is not None:
                    entry_tag = str(QName(namespace, 'entry'))
                    rss_movies = self.getElements(data, entry_tag)

                    for movie in rss_movies:
                        name_tag = str(QName(namespaceIM, 'name'))
                        name = self.getTextElement(movie, name_tag)

                        releaseDate_tag = str(QName(namespaceIM, 'releaseDate'))
                        releaseDateText = self.getTextElement(movie, releaseDate_tag)
                        year = datetime.datetime.strptime(releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y")

                        imdb = self.search(name, year)

                        if imdb and self.isMinimalMovie(imdb):
                            movies.append(imdb['imdb'])

            except:
                log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc()))

        return movies
Example #32
0
    def guess(self, files, extra={}):

        # Create hash for cache
        hash = md5(str([f.replace('.' + getExt(f), '') for f in files]))
        cached = self.getCache(hash)
        if cached and extra is {}: return cached

        for cur_file in files:
            size = (os.path.getsize(cur_file) / 1024 /
                    1024) if os.path.isfile(cur_file) else 0
            words = re.split('\W+', cur_file.lower())

            for quality in self.all():

                # Check tags
                if quality['identifier'] in words:
                    log.debug('Found via identifier "%s" in %s',
                              (quality['identifier'], cur_file))
                    return self.setCache(hash, quality)

                if list(set(quality.get('alternative', [])) & set(words)):
                    log.debug('Found %s via alt %s in %s',
                              (quality['identifier'],
                               quality.get('alternative'), cur_file))
                    return self.setCache(hash, quality)

                for tag in quality.get('tags', []):
                    if isinstance(tag,
                                  tuple) and '.'.join(tag) in '.'.join(words):
                        log.debug('Found %s via tag %s in %s',
                                  (quality['identifier'], quality.get('tags'),
                                   cur_file))
                        return self.setCache(hash, quality)

                if list(set(quality.get('tags', [])) & set(words)):
                    log.debug(
                        'Found %s via tag %s in %s',
                        (quality['identifier'], quality.get('tags'), cur_file))
                    return self.setCache(hash, quality)

        # Try again with loose testing
        quality = self.guessLoose(hash, extra=extra)
        if quality:
            return self.setCache(hash, quality)

        log.debug('Could not identify quality for: %s', files)
        return None
Example #33
0
    def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None):
        if not urlopen_kwargs: urlopen_kwargs = {}

        if not dest:  # to Cache
            dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url)))

        if not overwrite and os.path.isfile(dest):
            return dest

        try:
            filedata = self.urlopen(url, **urlopen_kwargs)
        except:
            log.error('Failed downloading file %s: %s', (url, traceback.format_exc()))
            return False

        self.createFile(dest, filedata, binary = True)
        return dest
Example #34
0
    def createReleases(self, search_results, media, quality_type):

        available_status = fireEvent('status.get', ['available'], single=True)
        db = get_session()

        found_releases = []

        for rel in search_results:

            nzb_identifier = md5(rel['url'])
            found_releases.append(nzb_identifier)

            rls = db.query(Release).filter_by(
                identifier=nzb_identifier).first()
            if not rls:
                rls = Release(
                    identifier=nzb_identifier,
                    movie_id=media.get('id'),
                    #media_id = media.get('id'),
                    quality_id=quality_type.get('quality_id'),
                    status_id=available_status.get('id'))
                db.add(rls)
            else:
                [db.delete(old_info) for old_info in rls.info]
                rls.last_edit = int(time.time())

            db.commit()

            for info in rel:
                try:
                    if not isinstance(rel[info],
                                      (str, unicode, int, long, float)):
                        continue

                    rls_info = ReleaseInfo(identifier=info,
                                           value=toUnicode(rel[info]))
                    rls.info.append(rls_info)
                except InterfaceError:
                    log.debug('Couldn\'t add %s to ReleaseInfo: %s',
                              (info, traceback.format_exc()))

            db.commit()

            rel['status_id'] = rls.status_id

        return found_releases
Example #35
0
    def getJsonData(self, url, decode_from=None, **kwargs):

        cache_key = md5(url)
        data = self.getCache(cache_key, url, **kwargs)

        if data:
            try:
                data = data.strip()
                if decode_from:
                    data = data.decode(decode_from)

                return json.loads(data)
            except:
                log.error('Failed to parsing %s: %s',
                          (self.getName(), traceback.format_exc()))

        return []
Example #36
0
    def guess(self, files, extra=None):
        if not extra: extra = {}

        # Create hash for cache
        cache_key = md5(str([f.replace('.' + getExt(f), '') for f in files]))
        cached = self.getCache(cache_key)
        if cached and len(extra) == 0: return cached

        qualities = self.all()

        # Start with 0
        score = {}
        for quality in qualities:
            score[quality.get('identifier')] = 0

        for cur_file in files:
            words = re.split('\W+', cur_file.lower())

            for quality in qualities:
                contains_score = self.containsTagScore(quality, words,
                                                       cur_file)
                self.calcScore(score, quality, contains_score)

        # Try again with loose testing
        for quality in qualities:
            loose_score = self.guessLooseScore(quality,
                                               files=files,
                                               extra=extra)
            self.calcScore(score, quality, loose_score)

        # Return nothing if all scores are 0
        has_non_zero = 0
        for s in score:
            if score[s] > 0:
                has_non_zero += 1

        if not has_non_zero:
            return None

        heighest_quality = max(score, key=score.get)
        if heighest_quality:
            for quality in qualities:
                if quality.get('identifier') == heighest_quality:
                    return self.setCache(cache_key, quality)

        return None
Example #37
0
    def get(self, *args, **kwargs):
        api_key = None

        try:
            username = Env.setting('username')
            password = Env.setting('password')

            if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password):
                api_key = Env.setting('api_key')

            self.write({
                'success': api_key is not None,
                'api_key': api_key
            })
        except:
            log.error('Failed doing key request: %s', (traceback.format_exc()))
            self.write({'success': False, 'error': 'Failed returning results'})
Example #38
0
    def getRSSData(self, url, item_path='channel/item', **kwargs):

        cache_key = md5(url)
        data = self.getCache(cache_key, url, **kwargs)

        if data and len(data) > 0:
            try:
                data = XMLTree.fromstring(data)
                return self.getElements(data, item_path)
            except:
                try:
                    data = XMLTree.fromstring(ss(data))
                    return self.getElements(data, item_path)
                except:
                    log.error('Failed to parsing %s: %s',
                              (self.getName(), traceback.format_exc()))

        return []
Example #39
0
    def call(self, method_url):

        try:
            if self.conf('automation_password'):
                headers = {
                   'Authorization': 'Basic %s' % base64.encodestring('%s:%s' % (self.conf('automation_username'), self.conf('automation_password')))[:-1]
                }
            else:
                headers = {}

            cache_key = 'trakt.%s' % md5(method_url)
            json_string = self.getCache(cache_key, self.urls['base'] + method_url, headers = headers)
            if json_string:
                return json.loads(json_string)
        except:
            log.error('Failed to get data from trakt, check your login.')

        return []
Example #40
0
    def post(self, *args, **kwargs):

        api_key = None

        username = Env.setting('username')
        password = Env.setting('password')

        if (self.get_argument('username') == username
                or not username) and (md5(self.get_argument('password'))
                                      == password or not password):
            api_key = Env.setting('api_key')

        if api_key:
            remember_me = tryInt(self.get_argument('remember_me', default=0))
            self.set_secure_cookie(
                'user', api_key, expires_days=30 if remember_me > 0 else None)

        self.redirect(Env.get('web_base'))
Example #41
0
    def download(self, url='', dest=None, overwrite=False):

        file = self.urlopen(url)
        if not file:
            log.error('File is empty, don\'t download')
            return False

        if not dest:  # to Cache
            dest = os.path.join(Env.get('cache_dir'),
                                '%s.%s' % (md5(url), getExt(url)))

        if overwrite or not os.path.exists(dest):
            log.debug('Writing file to: %s' % dest)
            output = open(dest, 'wb')
            output.write(file)
            output.close()
        else:
            log.debug('File already exists: %s' % dest)

        return dest
Example #42
0
    def download(self, url='', dest=None, overwrite=False):

        try:
            file = urllib2.urlopen(url)

            if not dest:  # to Cache
                dest = os.path.join(Env.get('cache_dir'),
                                    '%s.%s' % (md5(url), getExt(url)))

            if overwrite or not os.path.exists(dest):
                log.debug('Writing file to: %s' % dest)
                output = open(dest, 'wb')
                output.write(file.read())
                output.close()
            else:
                log.debug('File already exists: %s' % dest)

            return dest

        except Exception, e:
            log.error('Unable to download file "%s": %s' % (url, e))
Example #43
0
        def require_basic_auth(handler, kwargs):
            if Env.setting('username') and Env.setting('password'):

                auth_header = handler.request.headers.get('Authorization')
                auth_decoded = base64.decodestring(
                    auth_header[6:]) if auth_header else None
                if auth_decoded:
                    username, password = auth_decoded.split(':', 2)

                if auth_header is None or not auth_header.startswith(
                        'Basic ') or (not check_auth(
                            username.decode('latin'),
                            md5(password.decode('latin')))):
                    handler.set_status(401)
                    handler.set_header('WWW-Authenticate',
                                       'Basic realm="CouchPotato Login"')
                    handler._transforms = []
                    handler.finish()

                    return False

            return True
Example #44
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = self.conf('automation_urls_use').split(',')

        index = -1
        for rss_url in self.conf('automation_urls').split(','):

            index += 1
            if not enablers[index]:
                continue

            try:
                cache_key = 'imdb.rss.%s' % md5(rss_url)

                rss_data = self.getCache(cache_key, rss_url, headers = {'Referer': ''})
                data = XMLTree.fromstring(rss_data)
                rss_movies = self.getElements(data, 'channel/item')

                for movie in rss_movies:

                    nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, "title"), single = True)
                    imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True)

                    if not imdb:
                        continue

                    movies.append(imdb)
            except ParseError:
                log.debug('Failed loading Movies.io watchlist, probably empty: %s', (rss_url))
            except:
                log.error('Failed loading Movies.io watchlist: %s %s', (rss_url, traceback.format_exc()))

        return movies
Example #45
0
    def download(self, data, movie):

        snatched_status = fireEvent('status.get', 'snatched', single=True)

        successful = fireEvent('download', data=data, movie=movie, single=True)

        if successful:

            # Mark release as snatched
            db = get_session()
            rls = db.query(Release).filter_by(
                identifier=md5(data['url'])).first()
            rls.status_id = snatched_status.get('id')
            db.commit()

            log.info('Downloading of %s successful.' % data.get('name'))
            fireEvent('movie.snatched',
                      message='Downloading of %s successful.' %
                      data.get('name'),
                      data=rls.to_dict())

            return True

        return False
Example #46
0
File: main.py Project: Arcylus/PBI
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        cache_key = 'kinepolis.%s' % md5(self.rss_url)
        rss_data = self.getCache(cache_key, self.rss_url)
        data = XMLTree.fromstring(rss_data)

        if data is not None:
            rss_movies = self.getElements(data, 'channel/item')

            for movie in rss_movies:
                name = self.getTextElement(movie, "title")
                year = datetime.datetime.now().strftime("%Y")

                imdb = self.search(name, year)

                if imdb and self.isMinimalMovie(imdb):
                    movies.append(imdb['imdb'])

        return movies
Example #47
0
    def getCache(self, cache_key, url=None, **kwargs):
        cache_key_md5 = md5(cache_key)
        cache = Env.get('cache').get(cache_key_md5)
        if cache:
            if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
            return cache

        if url:
            try:

                cache_timeout = 300
                if kwargs.get('cache_timeout'):
                    cache_timeout = kwargs.get('cache_timeout')
                    del kwargs['cache_timeout']

                data = self.urlopen(url, **kwargs)
                if data:
                    self.setCache(cache_key, data, timeout=cache_timeout)
                return data
            except:
                if not kwargs.get('show_error', True):
                    raise

                return ''
Example #48
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        cache_key = 'bluray.%s' % md5(self.rss_url)
        rss_data = self.getCache(cache_key, self.rss_url)
        data = XMLTree.fromstring(rss_data)

        if data is not None:
            rss_movies = self.getElements(data, 'channel/item')

            for movie in rss_movies:
                name = self.getTextElement(
                    movie,
                    "title").lower().split("blu-ray")[0].strip("(").rstrip()
                year = self.getTextElement(
                    movie, "description").split("|")[1].strip("(").strip()

                if not name.find(
                        "/"
                ) == -1:  # make sure it is not a double movie release
                    continue

                if tryInt(year) < self.getMinimal('year'):
                    continue

                imdb = self.search(name, year)

                if imdb:
                    if self.isMinimalMovie(imdb):
                        movies.append(imdb['imdb'])

        return movies
Example #49
0
    def download(self, data, movie, manual=False):

        # Test to see if any downloaders are enabled for this type
        downloader_enabled = fireEvent('download.enabled',
                                       manual,
                                       data,
                                       single=True)

        if downloader_enabled:

            snatched_status = fireEvent('status.get', 'snatched', single=True)

            # Download movie to temp
            filedata = None
            if data.get('download') and (ismethod(data.get('download'))
                                         or isfunction(data.get('download'))):
                filedata = data.get('download')(url=data.get('url'),
                                                nzb_id=data.get('id'))
                if filedata == 'try_next':
                    return filedata

            download_result = fireEvent('download',
                                        data=data,
                                        movie=movie,
                                        manual=manual,
                                        filedata=filedata,
                                        single=True)
            log.debug('Downloader result: %s', download_result)

            if download_result:
                try:
                    # Mark release as snatched
                    db = get_session()
                    rls = db.query(Release).filter_by(
                        identifier=md5(data['url'])).first()
                    if rls:
                        renamer_enabled = Env.setting('enabled', 'renamer')

                        done_status = fireEvent('status.get',
                                                'done',
                                                single=True)
                        rls.status_id = done_status.get(
                            'id'
                        ) if not renamer_enabled else snatched_status.get('id')

                        # Save download-id info if returned
                        if isinstance(download_result, dict):
                            for key in download_result:
                                rls_info = ReleaseInfo(
                                    identifier='download_%s' % key,
                                    value=toUnicode(download_result.get(key)))
                                rls.info.append(rls_info)
                        db.commit()

                        log_movie = '%s (%s) in %s' % (getTitle(
                            movie['library']), movie['library']['year'],
                                                       rls.quality.label)
                        snatch_message = 'Snatched "%s": %s' % (
                            data.get('name'), log_movie)
                        log.info(snatch_message)
                        fireEvent('movie.snatched',
                                  message=snatch_message,
                                  data=rls.to_dict())

                        # If renamer isn't used, mark movie done
                        if not renamer_enabled:
                            active_status = fireEvent('status.get',
                                                      'active',
                                                      single=True)
                            done_status = fireEvent('status.get',
                                                    'done',
                                                    single=True)
                            try:
                                if movie['status_id'] == active_status.get(
                                        'id'):
                                    for profile_type in movie['profile'][
                                            'types']:
                                        if profile_type[
                                                'quality_id'] == rls.quality.id and profile_type[
                                                    'finish']:
                                            log.info(
                                                'Renamer disabled, marking movie as finished: %s',
                                                log_movie)

                                            # Mark release done
                                            rls.status_id = done_status.get(
                                                'id')
                                            rls.last_edit = int(time.time())
                                            db.commit()

                                            # Mark movie done
                                            mvie = db.query(Movie).filter_by(
                                                id=movie['id']).first()
                                            mvie.status_id = done_status.get(
                                                'id')
                                            mvie.last_edit = int(time.time())
                                            db.commit()
                            except:
                                log.error(
                                    'Failed marking movie finished, renamer disabled: %s',
                                    traceback.format_exc())

                except:
                    log.error('Failed marking movie finished: %s',
                              traceback.format_exc())

                return True

        log.info(
            'Tried to download, but none of the "%s" downloaders are enabled or gave an error',
            (data.get('type', '')))

        return False
Example #50
0
 def md5Password(self, value):
     return md5(value) if value else ''
Example #51
0
    def download(self, data, movie, manual=False):

        snatched_status = fireEvent('status.get', 'snatched', single=True)

        # Download movie to temp
        filedata = None
        if data.get('download') and (ismethod(data.get('download'))
                                     or isfunction(data.get('download'))):
            filedata = data.get('download')(url=data.get('url'),
                                            nzb_id=data.get('id'))
            if filedata is 'try_next':
                return filedata

        successful = fireEvent('download',
                               data=data,
                               movie=movie,
                               manual=manual,
                               filedata=filedata,
                               single=True)

        if successful:

            # Mark release as snatched
            db = get_session()
            rls = db.query(Release).filter_by(
                identifier=md5(data['url'])).first()
            rls.status_id = snatched_status.get('id')
            db.commit()

            log_movie = '%s (%s) in %s' % (getTitle(
                movie['library']), movie['library']['year'], rls.quality.label)
            snatch_message = 'Snatched "%s": %s' % (data.get('name'),
                                                    log_movie)
            log.info(snatch_message)
            fireEvent('movie.snatched',
                      message=snatch_message,
                      data=rls.to_dict())

            # If renamer isn't used, mark movie done
            if not Env.setting('enabled', 'renamer'):
                active_status = fireEvent('status.get', 'active', single=True)
                done_status = fireEvent('status.get', 'done', single=True)
                try:
                    if movie['status_id'] == active_status.get('id'):
                        for profile_type in movie['profile']['types']:
                            if profile_type[
                                    'quality_id'] == rls.quality.id and profile_type[
                                        'finish']:
                                log.info(
                                    'Renamer disabled, marking movie as finished: %s',
                                    log_movie)

                                # Mark release done
                                rls.status_id = done_status.get('id')
                                db.commit()

                                # Mark movie done
                                mvie = db.query(Movie).filter_by(
                                    id=movie['id']).first()
                                mvie.status_id = done_status.get('id')
                                db.commit()
                except Exception, e:
                    log.error('Failed marking movie finished: %s %s',
                              (e, traceback.format_exc()))

            #db.close()
            return True
Example #52
0
    def single(self, movie):

        done_status = fireEvent('status.get', 'done', single=True)

        if not movie['profile'] or movie['status_id'] == done_status.get('id'):
            log.debug(
                'Movie doesn\'t have a profile or already done, assuming in manage tab.'
            )
            return

        db = get_session()

        pre_releases = fireEvent('quality.pre_releases', single=True)
        release_dates = fireEvent('library.update_release_date',
                                  identifier=movie['library']['identifier'],
                                  merge=True)
        available_status = fireEvent('status.get', 'available', single=True)
        ignored_status = fireEvent('status.get', 'ignored', single=True)

        default_title = getTitle(movie['library'])
        if not default_title:
            log.error(
                'No proper info found for movie, removing it from library to cause it from having more issues.'
            )
            fireEvent('movie.delete', movie['id'], single=True)
            return

        fireEvent('notify.frontend',
                  type='searcher.started.%s' % movie['id'],
                  data=True,
                  message='Searching for "%s"' % default_title)

        ret = False
        for quality_type in movie['profile']['types']:
            if not self.couldBeReleased(quality_type['quality']['identifier'],
                                        release_dates, pre_releases):
                log.info(
                    'Too early to search for %s, %s',
                    (quality_type['quality']['identifier'], default_title))
                continue

            has_better_quality = 0

            # See if better quality is available
            for release in movie['releases']:
                if release['quality']['order'] <= quality_type['quality'][
                        'order'] and release['status_id'] not in [
                            available_status.get('id'),
                            ignored_status.get('id')
                        ]:
                    has_better_quality += 1

            # Don't search for quality lower then already available.
            if has_better_quality is 0:

                log.info('Search for %s in %s',
                         (default_title, quality_type['quality']['label']))
                quality = fireEvent(
                    'quality.single',
                    identifier=quality_type['quality']['identifier'],
                    single=True)

                results = fireEvent('yarr.search', movie, quality, merge=True)

                sorted_results = sorted(results,
                                        key=lambda k: k['score'],
                                        reverse=True)
                if len(sorted_results) == 0:
                    log.debug(
                        'Nothing found for %s in %s',
                        (default_title, quality_type['quality']['label']))

                download_preference = self.conf('preferred_method')
                if download_preference != 'both':
                    sorted_results = sorted(
                        sorted_results,
                        key=lambda k: k['type'],
                        reverse=(download_preference == 'torrent'))

                # Check if movie isn't deleted while searching
                if not db.query(Movie).filter_by(id=movie.get('id')).first():
                    break

                # Add them to this movie releases list
                for nzb in sorted_results:

                    rls = db.query(Release).filter_by(
                        identifier=md5(nzb['url'])).first()
                    if not rls:
                        rls = Release(
                            identifier=md5(nzb['url']),
                            movie_id=movie.get('id'),
                            quality_id=quality_type.get('quality_id'),
                            status_id=available_status.get('id'))
                        db.add(rls)
                        db.commit()
                    else:
                        [db.delete(info) for info in rls.info]
                        db.commit()

                    for info in nzb:
                        try:
                            if not isinstance(
                                    nzb[info],
                                (str, unicode, int, long, float)):
                                continue

                            rls_info = ReleaseInfo(identifier=info,
                                                   value=toUnicode(nzb[info]))
                            rls.info.append(rls_info)
                            db.commit()
                        except InterfaceError:
                            log.debug('Couldn\'t add %s to ReleaseInfo: %s',
                                      (info, traceback.format_exc()))

                    nzb['status_id'] = rls.status_id

                for nzb in sorted_results:
                    if nzb['status_id'] == ignored_status.get('id'):
                        log.info('Ignored: %s', nzb['name'])
                        continue

                    if nzb['score'] <= 0:
                        log.info('Ignored, score to low: %s', nzb['name'])
                        continue

                    downloaded = self.download(data=nzb, movie=movie)
                    if downloaded is True:
                        ret = True
                        break
                    elif downloaded != 'try_next':
                        break
            else:
                log.info(
                    'Better quality (%s) already available or snatched for %s',
                    (quality_type['quality']['label'], default_title))
                fireEvent('movie.restatus', movie['id'])
                break

            # Break if CP wants to shut down
            if self.shuttingDown() or ret:
                break

        fireEvent('notify.frontend',
                  type='searcher.ended.%s' % movie['id'],
                  data=True)

        #db.close()
        return ret
Example #53
0
 def getHTMLData(self, url, **kwargs):
     return self.getCache(md5(url), url, **kwargs)
Example #54
0
    def getHTMLData(self, url, **kwargs):

        cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {})))
        return self.getCache(cache_key, url, **kwargs)
Example #55
0
class NZBGet(Downloader):

    protocol = ['nzb']

    rpc = 'xmlrpc'

    def download(self, data=None, media=None, filedata=None):
        if not media: media = {}
        if not data: data = {}

        if not filedata:
            log.error('Unable to get NZB file: %s', traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get('name'))

        nzb_name = ss('%s.nzb' % self.createNzbName(data, media))

        url = cleanHost(host=self.conf('host'),
                        ssl=self.conf('ssl'),
                        username=self.conf('username'),
                        password=self.conf('password')) + self.rpc
        rpc = xmlrpclib.ServerProxy(url)

        try:
            if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' %
                            nzb_name):
                log.debug('Successfully connected to NZBGet')
            else:
                log.info(
                    'Successfully connected to NZBGet, but unable to send a message'
                )
        except socket.error:
            log.error(
                'NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.'
            )
            return False
        except xmlrpclib.ProtocolError, e:
            if e.errcode == 401:
                log.error('Password is incorrect.')
            else:
                log.error('Protocol Error: %s', e)
            return False

        if re.search(r"^0", rpc.version()):
            xml_response = rpc.append(nzb_name, self.conf('category'), False,
                                      standard_b64encode(filedata.strip()))
        else:
            xml_response = rpc.append(nzb_name, self.conf('category'),
                                      tryInt(self.conf('priority')), False,
                                      standard_b64encode(filedata.strip()))

        if xml_response:
            log.info('NZB sent successfully to NZBGet')
            nzb_id = md5(data['url'])  # about as unique as they come ;)
            couchpotato_id = "couchpotato=" + nzb_id
            groups = rpc.listgroups()
            file_id = [
                item['LastID'] for item in groups
                if item['NZBFilename'] == nzb_name
            ]
            confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id,
                                      file_id)
            if confirmed:
                log.debug('couchpotato parameter set in nzbget download')
            return self.downloadReturnId(nzb_id)
        else:
            log.error('NZBGet could not add %s to the queue.', nzb_name)
            return False
Example #56
0
 def md5Password(self, value):
     return md5(value.encode(Env.get('encoding'))) if value else ''
Example #57
0
    def single(self, movie):

        available_status = fireEvent('status.get', 'available', single=True)

        for type in movie['profile']['types']:

            has_better_quality = 0
            default_title = movie['library']['titles'][0]['title']

            # See if beter quality is available
            for release in movie['releases']:
                if release['quality']['order'] <= type['quality'][
                        'order'] and release[
                            'status_id'] is not available_status.get('id'):
                    has_better_quality += 1

            # Don't search for quality lower then already available.
            if has_better_quality is 0:

                log.info('Search for %s in %s' %
                         (default_title, type['quality']['label']))
                results = fireEvent('provider.yarr.search',
                                    movie,
                                    type['quality'],
                                    merge=True)
                sorted_results = sorted(results,
                                        key=lambda k: k['score'],
                                        reverse=True)

                # Add them to this movie releases list
                for nzb in sorted_results:
                    db = get_session()

                    rls = db.query(Release).filter_by(
                        identifier=md5(nzb['url'])).first()
                    if not rls:
                        rls = Release(identifier=md5(nzb['url']),
                                      movie_id=movie.get('id'),
                                      quality_id=type.get('quality_id'),
                                      status_id=available_status.get('id'))
                        db.add(rls)
                        db.commit()

                        for info in nzb:
                            try:
                                if not isinstance(nzb[info],
                                                  (str, unicode, int, long)):
                                    continue

                                rls_info = ReleaseInfo(identifier=info,
                                                       value=nzb[info])
                                rls.info.append(rls_info)
                                db.commit()
                            except InterfaceError:
                                log.debug(
                                    'Couldn\'t add %s to ReleaseInfo: %s' %
                                    (info, traceback.format_exc()))

                for nzb in sorted_results:
                    return self.download(data=nzb, movie=movie)
            else:
                log.info(
                    'Better quality (%s) already available or snatched for %s'
                    % (type['quality']['label'], default_title))
                break

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        return False
Example #58
0
    def getHTMLData(self, url, **kwargs):

        cache_key = md5(url)
        return self.getCache(cache_key, url, **kwargs)
Example #59
0
 def setCache(self, cache_key, value, timeout=300):
     cache_key_md5 = md5(cache_key)
     log.debug('Setting cache %s', cache_key)
     Env.get('cache').set(cache_key_md5, value, timeout)
     return value
Example #60
0
 def getUrl(self, url):
     return self.getCache(md5(simplifyString(url)), url = url)