Example #1
0
    def containsTagScore(self, quality, words, cur_file = ''):
        cur_file = ss(cur_file)
        score = 0

        points = {
            'identifier': 10,
            'label': 10,
            'alternative': 9,
            'tags': 9
        }

        # Check alt and tags
        for tag_type in ['identifier', 'alternative', 'tags', 'label']:
            qualities = quality.get(tag_type, [])
            qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities

            for alt in qualities:
                if (isinstance(alt, tuple)):
                    if len(set(words) & set(alt)) == len(alt):
                        log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
                        score += points.get(tag_type)
                    elif len(set(words) & set(alt)) > 0:
                        log.debug('Found %s via partial %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
                        score += points.get(tag_type) / 3

                if (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()):
                    log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
                    score += points.get(tag_type)

            if list(set(qualities) & set(words)):
                log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
                score += points.get(tag_type)

        return score
Example #2
0
    def safeMessage(self, msg, replace_tuple = ()):

        from couchpotato.core.helpers.encoding import ss, toUnicode

        msg = ss(msg)

        try:
            if isinstance(replace_tuple, tuple):
                msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)])
            elif isinstance(replace_tuple, dict):
                msg = msg % dict((k, ss(v)) for k, v in replace_tuple.iteritems())
            else:
                msg = msg % ss(replace_tuple)
        except Exception as e:
            self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))

        self.setup()
        if not self.is_develop:

            for replace in self.replace_private:
                msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg)
                msg = re.sub('(&%s=)[^\&]+' % replace, '&%s=xxx' % replace, msg)

            # Replace api key
            try:
                api_key = self.Env.setting('api_key')
                if api_key:
                    msg = msg.replace(api_key, 'API_KEY')
            except:
                pass

        return toUnicode(msg)
Example #3
0
    def safeMessage(self, msg, replace_tuple = ()):

        from couchpotato.environment import Env
        from couchpotato.core.helpers.encoding import ss

        msg = ss(msg)

        try:
            msg = msg % replace_tuple
        except:
            try:
                if isinstance(replace_tuple, tuple):
                    msg = msg % tuple([ss(x) for x in list(replace_tuple)])
                else:
                    msg = msg % ss(replace_tuple)
            except:
                self.logger.error(u'Failed encoding stuff to log: %s' % traceback.format_exc())

        if not Env.get('dev'):

            for replace in self.replace_private:
                msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg)
                msg = re.sub('(&%s=)[^\&]+' % replace, '&%s=xxx' % replace, msg)

            # Replace api key
            try:
                api_key = Env.setting('api_key')
                if api_key:
                    msg = msg.replace(api_key, 'API_KEY')
            except:
                pass

        return msg
Example #4
0
    def getAllDownloadStatus(self):

        log.debug('Checking SABnzbd download status.')

        # Go through Queue
        try:
            queue = self.call({
                'mode': 'queue',
            })
        except:
            log.error('Failed getting queue: %s', traceback.format_exc(1))
            return False

        # Go through history items
        try:
            history = self.call({
                'mode': 'history',
                'limit': 15,
            })
        except:
            log.error('Failed getting history json: %s', traceback.format_exc(1))
            return False

        release_downloads = ReleaseDownloadList(self)

        # Get busy releases
        for nzb in queue.get('slots', []):
            status = 'busy'
            if 'ENCRYPTED / ' in nzb['filename']:
                status = 'failed'

            release_downloads.append({
                'id': nzb['nzo_id'],
                'name': nzb['filename'],
                'status': status,
                'original_status': nzb['status'],
                'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
            })

        # Get old releases
        for nzb in history.get('slots', []):

            status = 'busy'
            if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
                status = 'failed'
            elif nzb['status'] == 'Completed':
                status = 'completed'

            release_downloads.append({
                'id': nzb['nzo_id'],
                'name': nzb['name'],
                'status': status,
                'original_status': nzb['status'],
                'timeleft': str(timedelta(seconds = 0)),
                'folder': os.path.dirname(ss(nzb['storage'])) if os.path.isfile(ss(nzb['storage'])) else ss(nzb['storage']),
            })

        return release_downloads
Example #5
0
    def getAllDownloadStatus(self):

        log.debug('Checking Deluge download status.')

        if not self.connect():
            return False

        release_downloads = ReleaseDownloadList(self)

        queue = self.drpc.get_alltorrents()

        if not queue:
            log.debug('Nothing in queue or error')
            return False

        for torrent_id in queue:
            torrent = queue[torrent_id]
            log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))

            # Deluge has no easy way to work out if a torrent is stalled or failing.
            #status = 'failed'
            status = 'busy'
            if torrent['is_seed'] and tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio']):
                # We have torrent['seeding_time'] to work out what the seeding time is, but we do not
                # have access to the downloader seed_time, as with deluge we have no way to pass it
                # when the torrent is added. So Deluge will only look at the ratio.
                # See above comment in download().
                status = 'seeding'
            elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
                status = 'completed'

            download_dir = torrent['save_path']
            if torrent['move_on_completed']:
                download_dir = torrent['move_completed_path']

            torrent_files = []
            for file_item in torrent['files']:
                torrent_files.append(os.path.join(download_dir, file_item['path']))

            release_downloads.append({
                'id': torrent['hash'],
                'name': torrent['name'],
                'status': status,
                'original_status': torrent['state'],
                'seed_ratio': torrent['ratio'],
                'timeleft': str(timedelta(seconds = torrent['eta'])),
                'folder': ss(download_dir) if len(torrent_files) == 1 else ss(os.path.join(download_dir, torrent['name'])),
                'files': ss('|'.join(torrent_files)),
            })

        return release_downloads
Example #6
0
File: base.py Project: Arcylus/PBI
    def getCache(self, cache_key, url = None, **kwargs):
        cache_key = md5(ss(cache_key))
        cache = Env.get('cache').get(cache_key)
        if cache:
            if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
            return cache

        if url:
            try:

                cache_timeout = 300
                if kwargs.get('cache_timeout'):
                    cache_timeout = kwargs.get('cache_timeout')
                    del kwargs['cache_timeout']

                data = self.urlopen(url, **kwargs)

                if data:
                    self.setCache(cache_key, data, timeout = cache_timeout)
                return data
            except:
                if not kwargs.get('show_error', True):
                    raise

                return ''
Example #7
0
    def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None):

        # Combine with previous suggestion_cache
        cached_suggestion = self.getCache('suggestion_cached')
        new_suggestions = []

        if ignore_imdb:
            for cs in cached_suggestion:
                if cs.get('imdb') != ignore_imdb:
                    new_suggestions.append(cs)

        # Get new results and add them
        if len(new_suggestions) - 1 < limit:

            db = get_session()
            active_movies = db.query(Movie) \
                .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
            movies = [x.library.identifier for x in active_movies]

            if ignored:
                ignored.extend([x.get('imdb') for x in new_suggestions])

            suggestions = fireEvent('movie.suggest', movies = movies, ignore = list(set(ignored)), single = True)

            if suggestions:
                new_suggestions.extend(suggestions)

        self.setCache(md5(ss('suggestion_cached')), new_suggestions, timeout = 6048000)

        return new_suggestions
Example #8
0
    def suggestView(self, **kwargs):

        movies = splitString(kwargs.get('movies', ''))
        ignored = splitString(kwargs.get('ignored', ''))
        limit = kwargs.get('limit', 6)

        if not movies or len(movies) == 0:
            db = get_session()
            active_movies = db.query(Movie) \
                .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
            movies = [x.library.identifier for x in active_movies]

        if not ignored or len(ignored) == 0:
            ignored = splitString(Env.prop('suggest_ignore', default = ''))

        cached_suggestion = self.getCache('suggestion_cached')
        if cached_suggestion:
            suggestions = cached_suggestion
        else:
            suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
            self.setCache(md5(ss('suggestion_cached')), suggestions, timeout = 6048000) # Cache for 10 weeks

        return {
            'success': True,
            'count': len(suggestions),
            'suggestions': suggestions[:limit]
        }
Example #9
0
    def validate(self, name = None):

        if not name:
            return

        name_enc = base64.b64encode(ss(name))
        return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
Example #10
0
    def getAllDownloadStatus(self):
        log.debug('Checking rTorrent download status.')

        if not self.connect():
            return False

        try:
            torrents = self.rt.get_torrents()

            statuses = StatusList(self)

            for item in torrents:
                status = 'busy'
                if item.complete:
                    if item.active:
                        status = 'seeding'
                    else:
                        status = 'completed'

                statuses.append({
                    'id': item.info_hash,
                    'name': item.name,
                    'status': status,
                    'seed_ratio': item.ratio,
                    'original_status': item.state,
                    'timeleft': str(timedelta(seconds = float(item.left_bytes) / item.down_rate)) if item.down_rate > 0 else -1,
                    'folder': ss(item.directory)
                })

            return statuses

        except Exception, err:
            log.error('Failed to get status from rTorrent: %s', err)
            return False
Example #11
0
    def download(self, data = {}, movie = {}, filedata = None):

        if not filedata:
            log.error('Unable to get NZB file: %s', traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get('name'))

        url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
        nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))

        rpc = xmlrpclib.ServerProxy(url)
        try:
            if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
                log.info('Successfully connected to NZBGet')
            else:
                log.info('Successfully connected to NZBGet, but unable to send a message')
        except socket.error:
            log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
            return False
        except xmlrpclib.ProtocolError, e:
            if e.errcode == 401:
                log.error('Password is incorrect.')
            else:
                log.error('Protocol Error: %s', e)
            return False
Example #12
0
    def deletePyc(self, only_excess=True):

        for root, dirs, files in os.walk(ss(Env.get("app_dir"))):

            pyc_files = filter(lambda filename: filename.endswith(".pyc"), files)
            py_files = set(filter(lambda filename: filename.endswith(".py"), files))
            excess_pyc_files = (
                filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
            )

            for excess_pyc_file in excess_pyc_files:
                full_path = os.path.join(root, excess_pyc_file)
                log.debug("Removing old PYC file: %s", full_path)
                try:
                    os.remove(full_path)
                except:
                    log.error("Couldn't remove %s: %s", (full_path, traceback.format_exc()))

            for dir_name in dirs:
                full_path = os.path.join(root, dir_name)
                if len(os.listdir(full_path)) == 0:
                    try:
                        os.rmdir(full_path)
                    except:
                        log.error("Couldn't remove empty directory %s: %s", (full_path, traceback.format_exc()))
Example #13
0
    def getFromURL(self, url):
        log.debug('Getting IMDBs from: %s', url)
        html = self.getHTMLData(url)

        try:
            split = splitString(html, split_on = "<div class=\"list compact\">")[1]
            html = splitString(split, split_on = "<div class=\"pages\">")[0]
        except:
            try:
                split = splitString(html, split_on = "<div id=\"main\">")

                if len(split) < 2:
                    log.error('Failed parsing IMDB page "%s", unexpected html.', url)
                    return []

                html = BeautifulSoup(split[1])
                for x in ['list compact', 'lister', 'list detail sub-list']:
                    html2 = html.find('div', attrs = {
                        'class': x
                    })

                    if html2:
                        html = html2.contents
                        html = ''.join([str(x) for x in html])
                        break
            except:
                log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))

        html = ss(html)
        imdbs = getImdb(html, multiple = True) if html else []

        return imdbs
Example #14
0
    def download(self, data={}, movie={}, filedata=None):

        if not filedata:
            log.error("Unable to get NZB file: %s", traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get("name"))

        url = self.url % {
            "host": self.conf("host"),
            "username": self.conf("username"),
            "password": self.conf("password"),
        }
        nzb_name = ss("%s.nzb" % self.createNzbName(data, movie))

        rpc = xmlrpclib.ServerProxy(url)
        try:
            if rpc.writelog("INFO", "CouchPotato connected to drop off %s." % nzb_name):
                log.info("Successfully connected to NZBGet")
            else:
                log.info("Successfully connected to NZBGet, but unable to send a message")
        except socket.error:
            log.error("NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.")
            return False
        except xmlrpclib.ProtocolError, e:
            if e.errcode == 401:
                log.error("Password is incorrect.")
            else:
                log.error("Protocol Error: %s", e)
            return False
Example #15
0
    def getAllDownloadStatus(self):

        raw_statuses = self.call("nzb")

        release_downloads = ReleaseDownloadList(self)
        for nzb in raw_statuses.get("nzbs", []):

            # Check status
            status = "busy"
            if nzb["state"] == 20:
                status = "completed"
            elif nzb["state"] in [21, 22, 24]:
                status = "failed"

            release_downloads.append(
                {
                    "id": nzb["id"],
                    "name": nzb["uiTitle"],
                    "status": status,
                    "original_status": nzb["state"],
                    "timeleft": -1,
                    "folder": ss(nzb["destinationPath"]),
                }
            )

        return release_downloads
    def moveFile(self, old, dest, forcemove = False):
        dest = ss(dest)
        try:
            if forcemove:
                shutil.move(old, dest)
            elif self.conf('file_action') == 'hardlink':
                link(old, dest)
            elif self.conf('file_action') == 'symlink':
                symlink(old, dest)
            elif self.conf('file_action') == 'copy':
                shutil.copy(old, dest)
            elif self.conf('file_action') == 'move_symlink':
                shutil.move(old, dest)
                symlink(dest, old)
            else:
                shutil.move(old, dest)

            try:
                os.chmod(dest, Env.getPermission('file'))
                if os.name == 'nt' and self.conf('ntfs_permission'):
                    os.popen('icacls "' + dest + '"* /reset /T')
            except:
                log.error('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1)))

        except OSError, err:
            # Copying from a filesystem with octal permission to an NTFS file system causes a permission error.  In this case ignore it.
            if not hasattr(os, 'chmod') or err.errno != errno.EPERM:
                raise
            else:
                if os.path.exists(dest):
                    os.unlink(old)
Example #17
0
    def _minify(self, file_type, files, position, out):

        cache = Env.get('cache_dir')
        out_name = 'minified_' + out
        out = os.path.join(cache, out_name)

        raw = []
        for file_path in files:
            f = open(file_path, 'r').read()

            if file_type == 'script':
                data = jsmin(f)
            else:
                data = cssprefixer.process(f, debug = False, minify = True)
                data = data.replace('../images/', '../static/images/')
                data = data.replace('../fonts/', '../static/fonts/')
                data = data.replace('../../static/', '../static/') # Replace inside plugins

            raw.append({'file': file_path, 'date': int(os.path.getmtime(file_path)), 'data': data})

        # Combine all files together with some comments
        data = ''
        for r in raw:
            data += self.comment.get(file_type) % (r.get('file'), r.get('date'))
            data += r.get('data') + '\n\n'

        self.createFile(out, ss(data.strip()))

        if not self.minified.get(file_type):
            self.minified[file_type] = {}
        if not self.minified[file_type].get(position):
            self.minified[file_type][position] = []

        minified_url = 'api/%s/file.cache/%s?%s' % (Env.setting('api_key'), out_name, tryInt(os.path.getmtime(out)))
        self.minified[file_type][position].append(minified_url)
Example #18
0
    def getFromURL(self, url):
        log.debug("Getting IMDBs from: %s", url)
        html = self.getHTMLData(url)

        try:
            split = splitString(html, split_on='<div class="list compact">')[1]
            html = splitString(split, split_on='<div class="pages">')[0]
        except:
            try:
                split = splitString(html, split_on='<div id="main">')

                if len(split) < 2:
                    log.error('Failed parsing IMDB page "%s", unexpected html.', url)
                    return []

                html = BeautifulSoup(split[1])
                for x in ["list compact", "lister", "list detail sub-list"]:
                    html2 = html.find("div", attrs={"class": x})

                    if html2:
                        html = html2.contents
                        html = "".join([str(x) for x in html])
                        break
            except:
                log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))

        html = ss(html)
        imdbs = getImdb(html, multiple=True) if html else []

        return imdbs
Example #19
0
    def replaceWith(self, path):
        app_dir = ss(Env.get('app_dir'))

        # Get list of files we want to overwrite
        self.deletePyc(only_excess = False)
        existing_files = []
        for root, subfiles, filenames in os.walk(app_dir):
            for filename in filenames:
                existing_files.append(os.path.join(root, filename))

        for root, subfiles, filenames in os.walk(path):
            for filename in filenames:
                fromfile = os.path.join(root, filename)
                tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))

                if not Env.get('dev'):
                    try:
                        os.remove(tofile)
                    except:
                        pass

                    try:
                        os.renames(fromfile, tofile)
                        try:
                            existing_files.remove(tofile)
                        except ValueError:
                            pass
                    except Exception, e:
                        log.error('Failed overwriting file: %s', e)
Example #20
0
    def safeMessage(self, msg, replace_tuple=()):

        from couchpotato.environment import Env
        from couchpotato.core.helpers.encoding import ss

        msg = ss(msg)

        try:
            msg = msg % replace_tuple
        except:
            try:
                if isinstance(replace_tuple, tuple):
                    msg = msg % tuple([ss(x) for x in list(replace_tuple)])
                else:
                    msg = msg % ss(replace_tuple)
            except Exception, e:
                self.logger.error(u'Failed encoding stuff to log "%s": %s' % (msg, e))
Example #21
0
 def makeDir(self, path):
     path = ss(path)
     try:
         if not os.path.isdir(path):
             os.makedirs(path, Env.getPermission("folder"))
         return True
     except Exception, e:
         log.error('Unable to create folder "%s": %s', (path, e))
Example #22
0
    def getAllDownloadStatus(self):

        log.debug('Checking Transmission download status.')

        if not self.connect():
            return False

        release_downloads = ReleaseDownloadList(self)

        return_params = {
            'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files']
        }

        queue = self.trpc.get_alltorrents(return_params)
        if not (queue and queue.get('torrents')):
            log.debug('Nothing in queue or error')
            return False

        for torrent in queue['torrents']:
            log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / isFinished=%s',
                (torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent['eta'], torrent['uploadRatio'], torrent['isFinished']))

            torrent_files = []
            for file_item in torrent['files']:
                torrent_files.append(os.path.normpath(os.path.join(ss(torrent['downloadDir']), ss(file_item['name']))))

            status = 'busy'
            if torrent['isStalled'] and self.conf('stalled_as_failed'):
                status = 'failed'
            elif torrent['status'] == 0 and torrent['percentDone'] == 1:
                status = 'completed'
            elif torrent['status'] in [5, 6]:
                status = 'seeding'

            release_downloads.append({
                'id': torrent['hashString'],
                'name': torrent['name'],
                'status': status,
                'original_status': torrent['status'],
                'seed_ratio': torrent['uploadRatio'],
                'timeleft': str(timedelta(seconds = torrent['eta'])),
                'folder': os.path.normpath(ss(torrent['downloadDir'])) if len(torrent_files) == 1 else os.path.normpath(os.path.join(ss(torrent['downloadDir']), ss(torrent['name']))),
                'files': ss('|'.join(torrent_files))
            })

        return release_downloads
Example #23
0
    def getMeta(self, filename):

        try:
            p = enzyme.parse(filename)

            # Video codec
            vc = ('H264' if p.video[0].codec == 'AVC1' else p.video[0].codec)

            # Audio codec
            ac = p.audio[0].codec
            try: ac = self.audio_codec_map.get(p.audio[0].codec)
            except: pass

            # Find title in video headers
            titles = []

            try:
                if p.title and self.findYear(p.title):
                    titles.append(ss(p.title))
            except:
                log.error('Failed getting title from meta: %s', traceback.format_exc())

            for video in p.video:
                try:
                    if video.title and self.findYear(video.title):
                        titles.append(ss(video.title))
                except:
                    log.error('Failed getting title from meta: %s', traceback.format_exc())

            return {
                'titles': list(set(titles)),
                'video': vc,
                'audio': ac,
                'resolution_width': tryInt(p.video[0].width),
                'resolution_height': tryInt(p.video[0].height),
                'audio_channels': p.audio[0].channels,
            }
        except enzyme.exceptions.ParseError:
            log.debug('Failed to parse meta for %s', filename)
        except enzyme.exceptions.NoParserError:
            log.debug('No parser found for %s', filename)
        except:
            log.debug('Failed parsing %s', filename)

        return {}
Example #24
0
    def replaceWith(self, path):
        app_dir = ss(Env.get('app_dir'))
        data_dir = ss(Env.get('data_dir'))

        # Get list of files we want to overwrite
        self.deletePyc()
        existing_files = []
        for root, subfiles, filenames in os.walk(app_dir):
            for filename in filenames:
                existing_files.append(os.path.join(root, filename))

        for root, subfiles, filenames in os.walk(path):
            for filename in filenames:
                fromfile = os.path.join(root, filename)
                tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))

                if not Env.get('dev'):
                    try:
                        if os.path.isfile(tofile):
                            os.remove(tofile)

                        dirname = os.path.dirname(tofile)
                        if not os.path.isdir(dirname):
                            self.makeDir(dirname)

                        shutil.move(fromfile, tofile)
                        try:
                            existing_files.remove(tofile)
                        except ValueError:
                            pass
                    except:
                        log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc()))
                        return False

        for still_exists in existing_files:

            if data_dir in still_exists:
                continue

            try:
                os.remove(still_exists)
            except:
                log.error('Failed removing non-used file: %s', traceback.format_exc())

        return True
Example #25
0
    def containsTagScore(self, quality, words, cur_file=""):
        cur_file = ss(cur_file)
        score = 0

        points = {"identifier": 10, "label": 10, "alternative": 9, "tags": 9, "ext": 3}

        # Check alt and tags
        for tag_type in ["identifier", "alternative", "tags", "label"]:
            qualities = quality.get(tag_type, [])
            qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities

            for alt in qualities:
                if isinstance(alt, tuple):
                    if len(set(words) & set(alt)) == len(alt):
                        log.debug(
                            "Found %s via %s %s in %s",
                            (quality["identifier"], tag_type, quality.get(tag_type), cur_file),
                        )
                        score += points.get(tag_type)
                    elif len(set(words) & set(alt)) > 0:
                        log.debug(
                            "Found %s via partial %s %s in %s",
                            (quality["identifier"], tag_type, quality.get(tag_type), cur_file),
                        )
                        score += points.get(tag_type) / 3

                if isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower():
                    log.debug(
                        "Found %s via %s %s in %s", (quality["identifier"], tag_type, quality.get(tag_type), cur_file)
                    )
                    score += points.get(tag_type) / 2

            if list(set(qualities) & set(words)):
                log.debug(
                    "Found %s via %s %s in %s", (quality["identifier"], tag_type, quality.get(tag_type), cur_file)
                )
                score += points.get(tag_type)

        # Check extention
        for ext in quality.get("ext", []):
            if ext == words[-1]:
                log.debug("Found %s extension in %s", (ext, cur_file))
                score += points["ext"]

        return score
Example #26
0
    def getImage(self, movie, type="poster", size="poster"):

        image_url = ""
        try:
            image_url = getattr(movie, type).geturl(size=size)
        except:
            log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))

        return image_url
    def getImage(self, movie, type = 'poster', size = 'poster'):

        image_url = ''
        try:
            image_url = getattr(movie, type).geturl(size = 'original')
        except:
            log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))

        return image_url
Example #28
0
def isSubFolder(sub_folder, base_folder):
    # Returns True if sub_folder is the same as or inside base_folder
    return base_folder and sub_folder and ss(os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep) in ss(os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep)
#    return base_folder and sub_folder and os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep in os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep
    if base_folder and sub_folder:
        base = sp(os.path.realpath(base_folder)) + os.path.sep
        subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
        return os.path.commonprefix([subfolder, base]) == base

    return False
Example #29
0
    def getImage(self, movie, type = 'poster', size = 'poster'):

        image_url = ''
        try:
            path = movie.get('%s_path' % type)
            image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path)
        except:
            log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))

        return image_url
Example #30
0
    def getMultImages(self, movie, type = 'backdrops', size = 'original'):

        image_urls = []
        try:
            for image in movie.get('images', {}).get(type, [])[1:5]:
                image_urls.append(self.getImage(image, 'file', size))
        except:
            log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))

        return image_urls
Example #31
0
    def download(self, data = None, media = None, filedata = None):
        """ Send a torrent/nzb file to the downloader

        :param data: dict returned from provider
            Contains the release information
        :param media: media dict with information
            Used for creating the filename when possible
        :param filedata: downloaded torrent/nzb filedata
            The file gets downloaded in the searcher and send to this function
            This is done to have failed checking before using the downloader, so the downloader
            doesn't need to worry about that
        :return: boolean
            One faile returns false, but the downloaded should log his own errors
        """

        if not media: media = {}
        if not data: data = {}

        if not filedata:
            log.error('Unable to get NZB file: %s', traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get('name'))

        nzb_name = ss('%s.nzb' % self.createNzbName(data, media))

        rpc = self.getRPC()

        try:
            if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
                log.debug('Successfully connected to NZBGet')
            else:
                log.info('Successfully connected to NZBGet, but unable to send a message')
        except socket.error:
            log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
            return False
        except xmlrpclib.ProtocolError as e:
            if e.errcode == 401:
                log.error('Password is incorrect.')
            else:
                log.error('Protocol Error: %s', e)
            return False

        if re.search(r"^0", rpc.version()):
            xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip()))
        else:
            xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip()))

        if xml_response:
            log.info('NZB sent successfully to NZBGet')
            nzb_id = md5(data['url'])  # about as unique as they come ;)
            couchpotato_id = "couchpotato=" + nzb_id
            groups = rpc.listgroups()
            file_id = [item['LastID'] for item in groups if item['NZBFilename'] == nzb_name]
            confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id, file_id)
            if confirmed:
                log.debug('couchpotato parameter set in nzbget download')
            return self.downloadReturnId(nzb_id)
        else:
            log.error('NZBGet could not add %s to the queue.', nzb_name)
            return False
Example #32
0
    def getAllDownloadStatus(self):

        log.debug('Checking Deluge download status.')

        if not self.connect():
            return False

        release_downloads = ReleaseDownloadList(self)

        queue = self.drpc.get_alltorrents()

        if not queue:
            log.debug('Nothing in queue or error')
            return False

        for torrent_id in queue:
            torrent = queue[torrent_id]
            log.debug(
                'name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s',
                (torrent['name'], torrent['hash'], torrent['save_path'],
                 torrent['move_completed_path'], torrent['hash'],
                 torrent['progress'], torrent['state'], torrent['eta'],
                 torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'],
                 torrent['is_finished'], torrent['paused']))

            # Deluge has no easy way to work out if a torrent is stalled or failing.
            #status = 'failed'
            status = 'busy'
            if torrent['is_seed'] and tryFloat(torrent['ratio']) < tryFloat(
                    torrent['stop_ratio']):
                # We have torrent['seeding_time'] to work out what the seeding time is, but we do not
                # have access to the downloader seed_time, as with deluge we have no way to pass it
                # when the torrent is added. So Deluge will only look at the ratio.
                # See above comment in download().
                status = 'seeding'
            elif torrent['is_seed'] and torrent['is_finished'] and torrent[
                    'paused'] and torrent['state'] == 'Paused':
                status = 'completed'

            download_dir = torrent['save_path']
            if torrent['move_on_completed']:
                download_dir = torrent['move_completed_path']

            torrent_files = []
            for file_item in torrent['files']:
                torrent_files.append(
                    os.path.join(download_dir, file_item['path']))

            release_downloads.append({
                'id':
                torrent['hash'],
                'name':
                torrent['name'],
                'status':
                status,
                'original_status':
                torrent['state'],
                'seed_ratio':
                torrent['ratio'],
                'timeleft':
                str(timedelta(seconds=torrent['eta'])),
                'folder':
                ss(download_dir) if len(torrent_files) == 1 else ss(
                    os.path.join(download_dir, torrent['name'])),
                'files':
                ss('|'.join(torrent_files)),
            })

        return release_downloads
Example #33
0
 def is_hidden(self, filepath):
     name = ss(os.path.basename(os.path.abspath(filepath)))
     return name.startswith('.') or self.has_hidden_attribute(filepath)
Example #34
0
    def urlopen(self,
                url,
                timeout=30,
                params=None,
                headers=None,
                opener=None,
                multipart=False,
                show_error=True):
        url = ss(url)

        if not headers: headers = {}
        if not params: params = {}

        # Fill in some headers
        headers['Referer'] = headers.get('Referer', urlparse(url).hostname)
        headers['Host'] = headers.get('Host', urlparse(url).hostname)
        headers['User-Agent'] = headers.get(
            'User-Agent',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2'
        )
        headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')

        host = urlparse(url).hostname

        # Don't try for failed requests
        if self.http_failed_disabled.get(host, 0) > 0:
            if self.http_failed_disabled[host] > (time.time() - 900):
                log.info2(
                    'Disabled calls to %s for 15 minutes because so many failed requests.',
                    host)
                if not show_error:
                    raise
                else:
                    return ''
            else:
                del self.http_failed_request[host]
                del self.http_failed_disabled[host]

        self.wait(host)
        try:

            if multipart:
                log.info('Opening multipart url: %s, params: %s',
                         (url, [x for x in params.iterkeys()] if isinstance(
                             params, dict) else 'with data'))
                request = urllib2.Request(url, params, headers)

                if opener:
                    opener.add_handler(MultipartPostHandler())
                else:
                    cookies = cookielib.CookieJar()
                    opener = urllib2.build_opener(
                        urllib2.HTTPCookieProcessor(cookies),
                        MultipartPostHandler)

                response = opener.open(request, timeout=timeout)
            else:
                log.info('Opening url: %s, params: %s',
                         (url, [x for x in params.iterkeys()]))
                data = tryUrlencode(params) if len(params) > 0 else None
                request = urllib2.Request(url, data, headers)

                if opener:
                    response = opener.open(request, timeout=timeout)
                else:
                    response = urllib2.urlopen(request, timeout=timeout)

            # unzip if needed
            if response.info().get('Content-Encoding') == 'gzip':
                buf = StringIO(response.read())
                f = gzip.GzipFile(fileobj=buf)
                data = f.read()
            else:
                data = response.read()

            self.http_failed_request[host] = 0
        except IOError:
            if show_error:
                log.error('Failed opening url in %s: %s %s',
                          (self.getName(), url, traceback.format_exc(1)))

            # Save failed requests by hosts
            try:
                if not self.http_failed_request.get(host):
                    self.http_failed_request[host] = 1
                else:
                    self.http_failed_request[host] += 1

                    # Disable temporarily
                    if self.http_failed_request[host] > 5:
                        self.http_failed_disabled[host] = time.time()

            except:
                log.debug('Failed logging failed requests for %s: %s',
                          (url, traceback.format_exc()))

            raise

        self.http_last_use[host] = time.time()

        return data
Example #35
0
 def setCache(self, cache_key, value, timeout=300):
     cache_key_md5 = md5(ss(cache_key))
     log.debug('Setting cache %s', cache_key)
     Env.get('cache').set(cache_key_md5, value, timeout)
     return value
Example #36
0
def isSubFolder(sub_folder, base_folder):
    # Returns True if sub_folder is the same as or inside base_folder
    return base_folder and sub_folder and ss(os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep) in ss(os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep)
    def download(self, data = None, media = None, filedata = None):
        """
        Send a torrent/nzb file to the downloader

        :param data: dict returned from provider
            Contains the release information
        :param media: media dict with information
            Used for creating the filename when possible
        :param filedata: downloaded torrent/nzb filedata
            The file gets downloaded in the searcher and send to this function
            This is done to have failed checking before using the downloader, so the downloader
            doesn't need to worry about that
        :return: boolean
            One faile returns false, but the downloaded should log his own errors
        """

        if not media: media = {}
        if not data: data = {}

        log.info('Sending "%s" to SABnzbd.', data.get('name'))

        req_params = {
            'cat': self.conf('category'),
            'mode': 'addurl',
            'nzbname': self.createNzbName(data, media),
            'priority': self.conf('priority'),
        }

        nzb_filename = None
        if filedata:
            if len(filedata) < 50:
                log.error('No proper nzb available: %s', filedata)
                return False

            # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
            nzb_filename = self.createFileName(data, filedata, media)
            req_params['mode'] = 'addfile'
        else:
            req_params['name'] = data.get('url')

        try:
            if nzb_filename and req_params.get('mode') is 'addfile':
                sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)})
            else:
                sab_data = self.call(req_params)
        except URLError:
            log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
            return False
        except:
            log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
            return False

        log.debug('Result from SAB: %s', sab_data)
        nzo_ids = sab_data.get('nzo_ids', [])
        if sab_data.get('status') and not sab_data.get('error') and isinstance(nzo_ids, list) and len(nzo_ids) > 0:
            log.info('NZB sent to SAB successfully.')
            if filedata:
                return self.downloadReturnId(nzo_ids[0])
            else:
                return True
        else:
            log.error('Error getting data from SABNZBd: %s', sab_data)
            return False
Example #38
0
    def getAllDownloadStatus(self):

        log.debug('Checking SABnzbd download status.')

        # Go through Queue
        try:
            queue = self.call({
                'mode': 'queue',
            })
        except:
            log.error('Failed getting queue: %s', traceback.format_exc(1))
            return False

        # Go through history items
        try:
            history = self.call({
                'mode': 'history',
                'limit': 15,
            })
        except:
            log.error('Failed getting history json: %s',
                      traceback.format_exc(1))
            return False

        release_downloads = ReleaseDownloadList(self)

        # Get busy releases
        for nzb in queue.get('slots', []):
            status = 'busy'
            if 'ENCRYPTED / ' in nzb['filename']:
                status = 'failed'

            release_downloads.append({
                'id':
                nzb['nzo_id'],
                'name':
                nzb['filename'],
                'status':
                status,
                'original_status':
                nzb['status'],
                'timeleft':
                nzb['timeleft'] if not queue['paused'] else -1,
            })

        # Get old releases
        for nzb in history.get('slots', []):

            status = 'busy'
            if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed'
                                             and nzb['fail_message'].strip()):
                status = 'failed'
            elif nzb['status'] == 'Completed':
                status = 'completed'

            release_downloads.append({
                'id':
                nzb['nzo_id'],
                'name':
                nzb['name'],
                'status':
                status,
                'original_status':
                nzb['status'],
                'timeleft':
                str(timedelta(seconds=0)),
                'folder':
                os.path.dirname(ss(nzb['storage'])) if os.path.isfile(
                    ss(nzb['storage'])) else ss(nzb['storage']),
            })

        return release_downloads
Example #39
0
 def add_torrent_file(self, filename, filedata):
     action = "action=add-file"
     return self._request(action,
                          {"torrent_file": (ss(filename), filedata)})
Example #40
0
    def urlopen(self,
                url,
                timeout=30,
                data=None,
                headers=None,
                files=None,
                show_error=True,
                stream=False):
        url = quote(ss(url), safe="%/:=&?~#+!$,;'@()*[]")

        if not headers: headers = {}
        if not data: data = {}

        # Fill in some headers
        parsed_url = urlparse(url)
        host = '%s%s' % (parsed_url.hostname,
                         (':' +
                          str(parsed_url.port) if parsed_url.port else ''))

        headers['Referer'] = headers.get('Referer',
                                         '%s://%s' % (parsed_url.scheme, host))
        headers['Host'] = headers.get('Host', None)
        headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
        headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
        headers['Connection'] = headers.get('Connection', 'keep-alive')
        headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')

        use_proxy = Env.setting('use_proxy')
        proxy_url = None

        if use_proxy:
            proxy_server = Env.setting('proxy_server')
            proxy_username = Env.setting('proxy_username')
            proxy_password = Env.setting('proxy_password')

            if proxy_server:
                loc = "{0}:{1}@{2}".format(
                    proxy_username, proxy_password,
                    proxy_server) if proxy_username else proxy_server
                proxy_url = {
                    "http": "http://" + loc,
                    "https": "https://" + loc,
                }
            else:
                proxy_url = getproxies()

        r = Env.get('http_opener')

        # Don't try for failed requests
        if self.http_failed_disabled.get(host, 0) > 0:
            if self.http_failed_disabled[host] > (time.time() - 900):
                log.info2(
                    'Disabled calls to %s for 15 minutes because so many failed requests.',
                    host)
                if not show_error:
                    raise Exception(
                        'Disabled calls to %s for 15 minutes because so many failed requests'
                        % host)
                else:
                    return ''
            else:
                del self.http_failed_request[host]
                del self.http_failed_disabled[host]

        self.wait(host, url)
        status_code = None
        try:

            kwargs = {
                'headers': headers,
                'data': data if len(data) > 0 else None,
                'timeout': timeout,
                'files': files,
                'verify':
                False,  #verify_ssl, Disable for now as to many wrongly implemented certificates..
                'stream': stream,
                'proxies': proxy_url,
            }
            method = 'post' if len(data) > 0 or files else 'get'

            log.info('Opening url: %s %s, data: %s',
                     (method, url, [x for x in data.keys()] if isinstance(
                         data, dict) else 'with data'))
            response = r.request(method, url, **kwargs)

            status_code = response.status_code
            if response.status_code == requests.codes.ok:
                data = response if stream else response.content
            else:
                response.raise_for_status()

            self.http_failed_request[host] = 0
        except (IOError, MaxRetryError, Timeout):
            if show_error:
                log.error('Failed opening url in %s: %s %s',
                          (self.getName(), url, traceback.format_exc(0)))

            # Save failed requests by hosts
            try:

                # To many requests
                if status_code in [429]:
                    self.http_failed_request[host] = 1
                    self.http_failed_disabled[host] = time.time()

                if not self.http_failed_request.get(host):
                    self.http_failed_request[host] = 1
                else:
                    self.http_failed_request[host] += 1

                    # Disable temporarily
                    if self.http_failed_request[host] > 5 and not isLocalIP(
                            host):
                        self.http_failed_disabled[host] = time.time()

            except:
                log.debug('Failed logging failed requests for %s: %s',
                          (url, traceback.format_exc()))

            raise

        self.http_last_use[host] = time.time()

        return data
Example #41
0
    def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True):
        url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")

        if not headers: headers = {}
        if not data: data = {}

        # Fill in some headers
        parsed_url = urlparse(url)
        host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else ''))

        headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host))
        headers['Host'] = headers.get('Host', host)
        headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
        headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
        headers['Connection'] = headers.get('Connection', 'keep-alive')
        headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')

        r = self.http_opener

        # Don't try for failed requests
        if self.http_failed_disabled.get(host, 0) > 0:
            if self.http_failed_disabled[host] > (time.time() - 900):
                log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
                if not show_error:
                    raise Exception('Disabled calls to %s for 15 minutes because so many failed requests')
                else:
                    return ''
            else:
                del self.http_failed_request[host]
                del self.http_failed_disabled[host]

        self.wait(host)
        try:

            kwargs = {
                'headers': headers,
                'data': data if len(data) > 0 else None,
                'timeout': timeout,
                'files': files,
            }
            method = 'post' if len(data) > 0 or files else 'get'

            log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
            response = r.request(method, url, verify = False, **kwargs)

            if response.status_code == requests.codes.ok:
                data = response.content
            else:
                response.raise_for_status()

            self.http_failed_request[host] = 0
        except (IOError, MaxRetryError, Timeout):
            if show_error:
                log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0)))

            # Save failed requests by hosts
            try:
                if not self.http_failed_request.get(host):
                    self.http_failed_request[host] = 1
                else:
                    self.http_failed_request[host] += 1

                    # Disable temporarily
                    if self.http_failed_request[host] > 5 and not isLocalIP(host):
                        self.http_failed_disabled[host] = time.time()

            except:
                log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc()))

            raise

        self.http_last_use[host] = time.time()

        return data
Example #42
0
    def scan(self,
             folder=None,
             files=None,
             simple=False,
             newer_than=0,
             on_found=None):

        folder = ss(os.path.normpath(folder))

        if not folder or not os.path.isdir(folder):
            log.error('Folder doesn\'t exists: %s', folder)
            return {}

        # Get movie "master" files
        movie_files = {}
        leftovers = []

        # Scan all files of the folder if no files are set
        if not files:
            check_file_date = True
            try:
                files = []
                for root, dirs, walk_files in os.walk(folder):
                    for filename in walk_files:
                        files.append(os.path.join(root, filename))
            except:
                log.error('Failed getting files from %s: %s',
                          (folder, traceback.format_exc()))
        else:
            check_file_date = False
            files = [ss(x) for x in files]

        db = get_session()

        for file_path in files:

            if not os.path.exists(file_path):
                continue

            # Remove ignored files
            if self.isSampleFile(file_path):
                leftovers.append(file_path)
                continue
            elif not self.keepFile(file_path):
                continue

            is_dvd_file = self.isDVDFile(file_path)
            if os.path.getsize(file_path) > self.minimal_filesize[
                    'media'] or is_dvd_file:  # Minimal 300MB files or is DVD file

                # Normal identifier
                identifier = self.createStringIdentifier(
                    file_path, folder, exclude_filename=is_dvd_file)
                identifiers = [identifier]

                # Identifier with quality
                quality = fireEvent('quality.guess', [file_path],
                                    single=True) if not is_dvd_file else {
                                        'identifier': 'dvdr'
                                    }
                if quality:
                    identifier_with_quality = '%s %s' % (
                        identifier, quality.get('identifier', ''))
                    identifiers = [identifier_with_quality, identifier]

                if not movie_files.get(identifier):
                    movie_files[identifier] = {
                        'unsorted_files': [],
                        'identifiers': identifiers,
                        'is_dvd': is_dvd_file,
                    }

                movie_files[identifier]['unsorted_files'].append(file_path)
            else:
                leftovers.append(file_path)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleanup
        del files

        # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
        # files will be grouped first.
        leftovers = set(sorted(leftovers, reverse=True))

        # Group files minus extension
        for identifier, group in movie_files.iteritems():
            if identifier not in group['identifiers'] and len(identifier) > 0:
                group['identifiers'].append(identifier)

            log.debug('Grouping files: %s', identifier)

            for file_path in group['unsorted_files']:
                wo_ext = file_path[:-(len(getExt(file_path)) + 1)]
                found_files = set([i for i in leftovers if wo_ext in i])
                group['unsorted_files'].extend(found_files)
                leftovers = leftovers - found_files

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Create identifiers for all leftover files
        path_identifiers = {}
        for file_path in leftovers:
            identifier = self.createStringIdentifier(file_path, folder)

            if not path_identifiers.get(identifier):
                path_identifiers[identifier] = []

            path_identifiers[identifier].append(file_path)

        # Group the files based on the identifier
        delete_identifiers = []
        for identifier, found_files in path_identifiers.iteritems():
            log.debug('Grouping files on identifier: %s', identifier)

            group = movie_files.get(identifier)
            if group:
                group['unsorted_files'].extend(found_files)
                delete_identifiers.append(identifier)

                # Remove the found files from the leftover stack
                leftovers = leftovers - set(found_files)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Group based on folder
        delete_identifiers = []
        for identifier, found_files in path_identifiers.iteritems():
            log.debug('Grouping files on foldername: %s', identifier)

            for ff in found_files:
                new_identifier = self.createStringIdentifier(
                    os.path.dirname(ff), folder)

                group = movie_files.get(new_identifier)
                if group:
                    group['unsorted_files'].extend([ff])
                    delete_identifiers.append(identifier)

                    # Remove the found files from the leftover stack
                    leftovers = leftovers - set([ff])

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Make sure we remove older / still extracting files
        valid_files = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = movie_files.popitem()
            except:
                break

            # Check if movie is fresh and maybe still unpacking, ignore files new then 1 minute
            file_too_new = False
            for cur_file in group['unsorted_files']:
                if not os.path.isfile(cur_file):
                    file_too_new = time.time()
                    break
                file_time = [
                    os.path.getmtime(cur_file),
                    os.path.getctime(cur_file)
                ]
                for t in file_time:
                    if t > time.time() - 60:
                        file_too_new = tryInt(time.time() - t)
                        break

                if file_too_new:
                    break

            if check_file_date and file_too_new:
                try:
                    time_string = time.ctime(file_time[0])
                except:
                    try:
                        time_string = time.ctime(file_time[1])
                    except:
                        time_string = 'unknown'

                log.info(
                    'Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s',
                    (time_string, identifier))

                # Delete the unsorted list
                del group['unsorted_files']

                continue

            # Only process movies newer than x
            if newer_than and newer_than > 0:
                has_new_files = False
                for cur_file in group['unsorted_files']:
                    file_time = [
                        os.path.getmtime(cur_file),
                        os.path.getctime(cur_file)
                    ]
                    if file_time[0] > newer_than or file_time[1] > newer_than:
                        has_new_files = True
                        break

                if not has_new_files:
                    log.debug(
                        'None of the files have changed since %s for %s, skipping.',
                        (time.ctime(newer_than), identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            valid_files[identifier] = group

        del movie_files

        # Determine file types
        processed_movies = {}
        total_found = len(valid_files)
        while True and not self.shuttingDown():
            try:
                identifier, group = valid_files.popitem()
            except:
                break

            # Group extra (and easy) files first
            # images = self.getImages(group['unsorted_files'])
            group['files'] = {
                'movie_extra': self.getMovieExtras(group['unsorted_files']),
                'subtitle': self.getSubtitles(group['unsorted_files']),
                'subtitle_extra':
                self.getSubtitlesExtras(group['unsorted_files']),
                'nfo': self.getNfo(group['unsorted_files']),
                'trailer': self.getTrailers(group['unsorted_files']),
                #'backdrop': images['backdrop'],
                'leftover': set(group['unsorted_files']),
            }

            # Media files
            if group['is_dvd']:
                group['files']['movie'] = self.getDVDFiles(
                    group['unsorted_files'])
            else:
                group['files']['movie'] = self.getMediaFiles(
                    group['unsorted_files'])

            if len(group['files']['movie']) == 0:
                log.error('Couldn\'t find any movie files for %s', identifier)
                continue

            log.debug('Getting metadata for %s', identifier)
            group['meta_data'] = self.getMetaData(group, folder=folder)

            # Subtitle meta
            group['subtitle_language'] = self.getSubtitleLanguage(
                group) if not simple else {}

            # Get parent dir from movie files
            for movie_file in group['files']['movie']:
                group['parentdir'] = os.path.dirname(movie_file)
                group['dirname'] = None

                folder_names = group['parentdir'].replace(folder, '').split(
                    os.path.sep)
                folder_names.reverse()

                # Try and get a proper dirname, so no "A", "Movie", "Download" etc
                for folder_name in folder_names:
                    if folder_name.lower(
                    ) not in self.ignore_names and len(folder_name) > 2:
                        group['dirname'] = folder_name
                        break

                break

            # Leftover "sorted" files
            for file_type in group['files']:
                if not file_type is 'leftover':
                    group['files']['leftover'] -= set(
                        group['files'][file_type])

            # Delete the unsorted list
            del group['unsorted_files']

            # Determine movie
            group['library'] = self.determineMovie(group)
            if not group['library']:
                log.error('Unable to determine movie: %s',
                          group['identifiers'])
            else:
                movie = db.query(Movie).filter_by(
                    library_id=group['library']['id']).first()
                group['movie_id'] = None if not movie else movie.id

            processed_movies[identifier] = group

            # Notify parent & progress on something found
            if on_found:
                on_found(group, total_found,
                         total_found - len(processed_movies))

        if len(processed_movies) > 0:
            log.info('Found %s movies in the folder %s',
                     (len(processed_movies), folder))
        else:
            log.debug('Found no movies in the folder %s', (folder))

        return processed_movies
Example #43
0
class Renamer(Plugin):

    renaming_started = False
    checking_snatched = False

    def __init__(self):
        addApiView(
            'renamer.scan',
            self.scanView,
            docs={
                'desc':
                'For the renamer to check for new files to rename in a folder',
                'params': {
                    'async': {
                        'desc':
                        'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'
                    },
                    'movie_folder': {
                        'desc':
                        'Optional: The folder of the movie to scan. Keep empty for default renamer folder.'
                    },
                    'downloader': {
                        'desc':
                        'Optional: The downloader this movie has been downloaded with'
                    },
                    'download_id': {
                        'desc': 'Optional: The downloader\'s nzb/torrent ID'
                    },
                },
            })

        addEvent('renamer.scan', self.scan)
        addEvent('renamer.check_snatched', self.checkSnatched)

        addEvent('app.load', self.scan)
        addEvent('app.load', self.checkSnatched)
        addEvent('app.load', self.setCrons)

        # Enable / disable interval
        addEvent('setting.save.renamer.enabled.after', self.setCrons)
        addEvent('setting.save.renamer.run_every.after', self.setCrons)
        addEvent('setting.save.renamer.force_every.after', self.setCrons)

    def setCrons(self):

        fireEvent('schedule.remove', 'renamer.check_snatched')
        if self.isEnabled() and self.conf('run_every') > 0:
            fireEvent('schedule.interval',
                      'renamer.check_snatched',
                      self.checkSnatched,
                      minutes=self.conf('run_every'),
                      single=True)

        fireEvent('schedule.remove', 'renamer.check_snatched_forced')
        if self.isEnabled() and self.conf('force_every') > 0:
            fireEvent('schedule.interval',
                      'renamer.check_snatched_forced',
                      self.scan,
                      hours=self.conf('force_every'),
                      single=True)

        return True

    def scanView(self, **kwargs):

        async = tryInt(kwargs.get('async', None))
        movie_folder = kwargs.get('movie_folder', None)
        downloader = kwargs.get('downloader', None)
        download_id = kwargs.get('download_id', None)

        fire_handle = fireEvent if not async else fireEventAsync

        fire_handle('renamer.scan',
                    movie_folder=movie_folder,
                    download_info={
                        'id': download_id,
                        'downloader': downloader
                    } if download_id else None)

        return {'success': True}

    def scan(self, movie_folder=None, download_info=None):

        if self.isDisabled():
            return

        if self.renaming_started is True:
            log.info(
                'Renamer is already running, if you see this often, check the logs above for errors.'
            )
            return

        # Check to see if the "to" folder is inside the "from" folder.
        if movie_folder and not os.path.isdir(
                movie_folder) or not os.path.isdir(
                    self.conf('from')) or not os.path.isdir(self.conf('to')):
            l = log.debug if movie_folder else log.error
            l('Both the "To" and "From" have to exist.')
            return
        elif self.conf('from') in self.conf('to'):
            log.error(
                'The "to" can\'t be inside of the "from" folder. You\'ll get an infinite loop.'
            )
            return
        elif (movie_folder
              and movie_folder in [self.conf('to'),
                                   self.conf('from')]):
            log.error(
                'The "to" and "from" folders can\'t be inside of or the same as the provided movie folder.'
            )
            return

        self.renaming_started = True

        # make sure the movie folder name is included in the search
        folder = None
        files = []
        if movie_folder:
            log.info('Scanning movie folder %s...', movie_folder)
            movie_folder = movie_folder.rstrip(os.path.sep)
            folder = os.path.dirname(movie_folder)

            # Get all files from the specified folder
            try:
                for root, folders, names in os.walk(movie_folder):
                    files.extend([os.path.join(root, name) for name in names])
            except:
                log.error('Failed getting files from %s: %s',
                          (movie_folder, traceback.format_exc()))

        db = get_session()

        # Extend the download info with info stored in the downloaded release
        download_info = self.extendDownloadInfo(download_info)

        groups = fireEvent('scanner.scan',
                           folder=folder if folder else self.conf('from'),
                           files=files,
                           download_info=download_info,
                           return_ignored=False,
                           single=True)

        destination = self.conf('to')
        folder_name = self.conf('folder_name')
        file_name = self.conf('file_name')
        trailer_name = self.conf('trailer_name')
        nfo_name = self.conf('nfo_name')
        separator = self.conf('separator')

        # Statusses
        done_status, active_status, downloaded_status, snatched_status = \
            fireEvent('status.get', ['done', 'active', 'downloaded', 'snatched'], single = True)

        for group_identifier in groups:

            group = groups[group_identifier]
            rename_files = {}
            remove_files = []
            remove_releases = []

            movie_title = getTitle(group['library'])

            # Add _UNKNOWN_ if no library item is connected
            if not group['library'] or not movie_title:
                self.tagDir(group, 'unknown')
                continue
            # Rename the files using the library data
            else:
                group['library'] = fireEvent(
                    'library.update',
                    identifier=group['library']['identifier'],
                    single=True)
                if not group['library']:
                    log.error(
                        'Could not rename, no library item to work with: %s',
                        group_identifier)
                    continue

                library = group['library']
                movie_title = getTitle(library)

                # Find subtitle for renaming
                fireEvent('renamer.before', group)

                # Remove weird chars from moviename
                movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', movie_title)

                # Put 'The' at the end
                name_the = movie_name
                if movie_name[:4].lower() == 'the ':
                    name_the = movie_name[4:] + ', The'

                replacements = {
                    'ext':
                    'mkv',
                    'namethe':
                    name_the.strip(),
                    'thename':
                    movie_name.strip(),
                    'year':
                    library['year'],
                    'first':
                    name_the[0].upper(),
                    'quality':
                    group['meta_data']['quality']['label'],
                    'quality_type':
                    group['meta_data']['quality_type'],
                    'video':
                    group['meta_data'].get('video'),
                    'audio':
                    group['meta_data'].get('audio'),
                    'group':
                    group['meta_data']['group'],
                    'source':
                    group['meta_data']['source'],
                    'resolution_width':
                    group['meta_data'].get('resolution_width'),
                    'resolution_height':
                    group['meta_data'].get('resolution_height'),
                    'audio_channels':
                    group['meta_data'].get('audio_channels'),
                    'imdb_id':
                    library['identifier'],
                    'cd':
                    '',
                    'cd_nr':
                    '',
                }

                for file_type in group['files']:

                    # Move nfo depending on settings
                    if file_type is 'nfo' and not self.conf('rename_nfo'):
                        log.debug('Skipping, renaming of %s disabled',
                                  file_type)
                        if self.conf('cleanup'):
                            for current_file in group['files'][file_type]:
                                remove_files.append(current_file)
                        continue

                    # Subtitle extra
                    if file_type is 'subtitle_extra':
                        continue

                    # Move other files
                    multiple = len(
                        group['files'][file_type]) > 1 and not group['is_dvd']
                    cd = 1 if multiple else 0

                    for current_file in sorted(list(
                            group['files'][file_type])):
                        current_file = toUnicode(current_file)

                        # Original filename
                        replacements['original'] = os.path.splitext(
                            os.path.basename(current_file))[0]
                        replacements['original_folder'] = fireEvent(
                            'scanner.remove_cptag',
                            group['dirname'],
                            single=True)

                        # Extension
                        replacements['ext'] = getExt(current_file)

                        # cd #
                        replacements['cd'] = ' cd%d' % cd if multiple else ''
                        replacements['cd_nr'] = cd if multiple else ''

                        # Naming
                        final_folder_name = self.doReplace(
                            folder_name, replacements)
                        final_file_name = self.doReplace(
                            file_name, replacements)
                        replacements['filename'] = final_file_name[:-(
                            len(getExt(final_file_name)) + 1)]

                        # Meta naming
                        if file_type is 'trailer':
                            final_file_name = self.doReplace(
                                trailer_name,
                                replacements,
                                remove_multiple=True)
                        elif file_type is 'nfo':
                            final_file_name = self.doReplace(
                                nfo_name, replacements, remove_multiple=True)

                        # Seperator replace
                        if separator:
                            final_file_name = final_file_name.replace(
                                ' ', separator)

                        # Move DVD files (no structure renaming)
                        if group['is_dvd'] and file_type is 'movie':
                            found = False
                            for top_dir in [
                                    'video_ts', 'audio_ts', 'bdmv',
                                    'certificate'
                            ]:
                                has_string = current_file.lower().find(
                                    os.path.sep + top_dir + os.path.sep)
                                if has_string >= 0:
                                    structure_dir = current_file[
                                        has_string:].lstrip(os.path.sep)
                                    rename_files[current_file] = os.path.join(
                                        destination, final_folder_name,
                                        structure_dir)
                                    found = True
                                    break

                            if not found:
                                log.error(
                                    'Could not determine dvd structure for: %s',
                                    current_file)

                        # Do rename others
                        else:
                            if file_type is 'leftover':
                                if self.conf('move_leftover'):
                                    rename_files[current_file] = os.path.join(
                                        destination, final_folder_name,
                                        os.path.basename(current_file))
                            elif file_type not in ['subtitle']:
                                rename_files[current_file] = os.path.join(
                                    destination, final_folder_name,
                                    final_file_name)

                        # Check for extra subtitle files
                        if file_type is 'subtitle':

                            remove_multiple = False
                            if len(group['files']['movie']) == 1:
                                remove_multiple = True

                            sub_langs = group['subtitle_language'].get(
                                current_file, [])

                            # rename subtitles with or without language
                            sub_name = self.doReplace(
                                file_name,
                                replacements,
                                remove_multiple=remove_multiple)
                            rename_files[current_file] = os.path.join(
                                destination, final_folder_name, sub_name)

                            rename_extras = self.getRenameExtras(
                                extra_type='subtitle_extra',
                                replacements=replacements,
                                folder_name=folder_name,
                                file_name=file_name,
                                destination=destination,
                                group=group,
                                current_file=current_file,
                                remove_multiple=remove_multiple,
                            )

                            # Don't add language if multiple languages in 1 subtitle file
                            if len(sub_langs) == 1:
                                sub_name = sub_name.replace(
                                    replacements['ext'], '%s.%s' %
                                    (sub_langs[0], replacements['ext']))
                                rename_files[current_file] = os.path.join(
                                    destination, final_folder_name, sub_name)

                            rename_files = mergeDicts(rename_files,
                                                      rename_extras)

                        # Filename without cd etc
                        elif file_type is 'movie':
                            rename_extras = self.getRenameExtras(
                                extra_type='movie_extra',
                                replacements=replacements,
                                folder_name=folder_name,
                                file_name=file_name,
                                destination=destination,
                                group=group,
                                current_file=current_file)
                            rename_files = mergeDicts(rename_files,
                                                      rename_extras)

                            group['filename'] = self.doReplace(
                                file_name, replacements, remove_multiple=True
                            )[:-(len(getExt(final_file_name)) + 1)]
                            group['destination_dir'] = os.path.join(
                                destination, final_folder_name)

                        if multiple:
                            cd += 1

                # Before renaming, remove the lower quality files
                library = db.query(Library).filter_by(
                    identifier=group['library']['identifier']).first()
                remove_leftovers = True

                # Add it to the wanted list before we continue
                if len(library.movies) == 0:
                    profile = db.query(Profile).filter_by(
                        core=True,
                        label=group['meta_data']['quality']['label']).first()
                    fireEvent('movie.add',
                              params={
                                  'identifier': group['library']['identifier'],
                                  'profile_id': profile.id
                              },
                              search_after=False)
                    db.expire_all()
                    library = db.query(Library).filter_by(
                        identifier=group['library']['identifier']).first()

                for movie in library.movies:

                    # Mark movie "done" onces it found the quality with the finish check
                    try:
                        if movie.status_id == active_status.get(
                                'id') and movie.profile:
                            for profile_type in movie.profile.types:
                                if profile_type.quality_id == group[
                                        'meta_data']['quality'][
                                            'id'] and profile_type.finish:
                                    movie.status_id = done_status.get('id')
                                    movie.last_edit = int(time.time())
                                    db.commit()
                    except Exception, e:
                        log.error('Failed marking movie finished: %s %s',
                                  (e, traceback.format_exc()))

                    # Go over current movie releases
                    for release in movie.releases:

                        # When a release already exists
                        if release.status_id is done_status.get('id'):

                            # This is where CP removes older, lesser quality releases
                            if release.quality.order > group['meta_data'][
                                    'quality']['order']:
                                log.info('Removing lesser quality %s for %s.',
                                         (movie.library.titles[0].title,
                                          release.quality.label))
                                for current_file in release.files:
                                    remove_files.append(current_file)
                                remove_releases.append(release)
                            # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc
                            elif release.quality.order is group['meta_data'][
                                    'quality']['order']:
                                log.info(
                                    'Same quality release already exists for %s, with quality %s. Assuming repack.',
                                    (movie.library.titles[0].title,
                                     release.quality.label))
                                for current_file in release.files:
                                    remove_files.append(current_file)
                                remove_releases.append(release)

                            # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan
                            else:
                                log.info(
                                    'Better quality release already exists for %s, with quality %s',
                                    (movie.library.titles[0].title,
                                     release.quality.label))

                                # Add exists tag to the .ignore file
                                self.tagDir(group, 'exists')

                                # Notify on rename fail
                                download_message = 'Renaming of %s (%s) canceled, exists in %s already.' % (
                                    movie.library.titles[0].title,
                                    group['meta_data']['quality']['label'],
                                    release.quality.label)
                                fireEvent('movie.renaming.canceled',
                                          message=download_message,
                                          data=group)
                                remove_leftovers = False

                                break
                        elif release.status_id is snatched_status.get('id'):
                            if release.quality.id is group['meta_data'][
                                    'quality']['id']:
                                log.debug('Marking release as downloaded')
                                try:
                                    release.status_id = downloaded_status.get(
                                        'id')
                                    release.last_edit = int(time.time())
                                except Exception, e:
                                    log.error(
                                        'Failed marking release as finished: %s %s',
                                        (e, traceback.format_exc()))

                                db.commit()

                # Remove leftover files
                if self.conf('cleanup') and not self.conf('move_leftover') and remove_leftovers and \
                        not (self.conf('file_action') != 'move' and self.downloadIsTorrent(download_info)):
                    log.debug('Removing leftover files')
                    for current_file in group['files']['leftover']:
                        remove_files.append(current_file)
                elif not remove_leftovers:  # Don't remove anything
                    break

            # Remove files
            delete_folders = []
            for src in remove_files:

                if isinstance(src, File):
                    src = src.path

                if rename_files.get(src):
                    log.debug('Not removing file that will be renamed: %s',
                              src)
                    continue

                log.info('Removing "%s"', src)
                try:
                    src = ss(src)
                    if os.path.isfile(src):
                        os.remove(src)

                        parent_dir = os.path.normpath(os.path.dirname(src))
                        if delete_folders.count(
                                parent_dir) == 0 and os.path.isdir(
                                    parent_dir) and not parent_dir in [
                                        destination, movie_folder
                                    ] and not self.conf('from') in parent_dir:
                            delete_folders.append(parent_dir)

                except:
                    log.error('Failed removing %s: %s',
                              (src, traceback.format_exc()))
                    self.tagDir(group, 'failed_remove')

            # Delete leftover folder from older releases
            for delete_folder in delete_folders:
                try:
                    self.deleteEmptyFolder(delete_folder, show_error=False)
                except Exception, e:
                    log.error('Failed to delete folder: %s %s',
                              (e, traceback.format_exc()))
Example #44
0
    def getAllDownloadStatus(self):

        log.debug('Checking Deluge download status.')

        if not os.path.isdir(Env.setting('from', 'renamer')):
            log.error('Renamer "from" folder doesn\'t to exist.')
            return

        if not self.connect():
            return False

        statuses = StatusList(self)

        queue = self.drpc.get_alltorrents()

        if not queue:
            log.debug('Nothing in queue or error')
            return False

        for torrent_id in queue:
            item = queue[torrent_id]
            log.debug(
                'name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s',
                (item['name'], item['hash'], item['save_path'],
                 item['move_completed_path'], item['hash'], item['progress'],
                 item['state'], item['eta'], item['ratio'], item['stop_ratio'],
                 item['is_seed'], item['is_finished'], item['paused']))

            # Deluge has no easy way to work out if a torrent is stalled or failing.
            #status = 'failed'
            status = 'busy'
            if item['is_seed'] and tryFloat(item['ratio']) < tryFloat(
                    item['stop_ratio']):
                # We have item['seeding_time'] to work out what the seeding time is, but we do not
                # have access to the downloader seed_time, as with deluge we have no way to pass it
                # when the torrent is added. So Deluge will only look at the ratio.
                # See above comment in download().
                status = 'seeding'
            elif item['is_seed'] and item['is_finished'] and item[
                    'paused'] and item['state'] == 'Paused':
                status = 'completed'

            download_dir = item['save_path']
            if item['move_on_completed']:
                download_dir = item['move_completed_path']

            statuses.append({
                'id':
                item['hash'],
                'name':
                item['name'],
                'status':
                status,
                'original_status':
                item['state'],
                'seed_ratio':
                item['ratio'],
                'timeleft':
                str(timedelta(seconds=item['eta'])),
                'folder':
                ss(os.path.join(download_dir, item['name'])),
            })

        return statuses
Example #45
0
    def updateLibrary(self, full=True):
        last_update = float(Env.prop('manage.last_update', default=0))

        if self.in_progress:
            log.info('Already updating library: %s', self.in_progress)
            return
        elif self.isDisabled() or (last_update > time.time() - 20):
            return

        self.in_progress = {}
        fireEvent('notify.frontend', type='manage.updating', data=True)

        try:

            directories = self.directories()
            added_identifiers = []

            # Add some progress
            self.in_progress = {}
            for directory in directories:
                self.in_progress[os.path.normpath(directory)] = {
                    'total': None,
                    'to_go': None,
                }

            for directory in directories:
                folder = os.path.normpath(directory)

                if not os.path.isdir(folder):
                    if len(directory) > 0:
                        log.error('Directory doesn\'t exist: %s', folder)
                    continue

                log.info('Updating manage library: %s', folder)
                fireEvent('notify.frontend',
                          type='manage.update',
                          data=True,
                          message='Scanning for movies in "%s"' % folder)

                onFound = self.createAddToLibrary(folder, added_identifiers)
                fireEvent('scanner.scan',
                          folder=folder,
                          simple=True,
                          newer_than=last_update if not full else 0,
                          on_found=onFound,
                          single=True)

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

            # If cleanup option is enabled, remove offline files from database
            if self.conf('cleanup') and full and not self.shuttingDown():

                # Get movies with done status
                total_movies, done_movies = fireEvent('movie.list',
                                                      status='done',
                                                      single=True)

                for done_movie in done_movies:
                    if done_movie['library'][
                            'identifier'] not in added_identifiers:
                        fireEvent('movie.delete',
                                  movie_id=done_movie['id'],
                                  delete_from='all')
                    else:

                        for release in done_movie.get('releases', []):
                            if len(release.get('files', [])) == 0:
                                fireEvent('release.delete', release['id'])
                            else:
                                for release_file in release.get('files', []):
                                    # Remove release not available anymore
                                    if not os.path.isfile(
                                            ss(release_file['path'])):
                                        fireEvent('release.clean',
                                                  release['id'])
                                        break

                        # Check if there are duplicate releases (different quality) use the last one, delete the rest
                        if len(done_movie.get('releases', [])) > 1:
                            used_files = {}
                            for release in done_movie.get('releases', []):

                                for release_file in release.get('files', []):
                                    already_used = used_files.get(
                                        release_file['path'])

                                    if already_used:
                                        if already_used < release['id']:
                                            fireEvent(
                                                'release.delete',
                                                release['id'],
                                                single=True)  # delete this one
                                        else:
                                            fireEvent('release.delete',
                                                      already_used,
                                                      single=True
                                                      )  # delete previous one
                                        break
                                    else:
                                        used_files[release_file[
                                            'path']] = release.get('id')
                            del used_files

            Env.prop('manage.last_update', time.time())
        except:
            log.error('Failed updating library: %s', (traceback.format_exc()))

        while True and not self.shuttingDown():

            delete_me = {}

            for folder in self.in_progress:
                if self.in_progress[folder]['to_go'] <= 0:
                    delete_me[folder] = True

            for delete in delete_me:
                del self.in_progress[delete]

            if len(self.in_progress) == 0:
                break

            time.sleep(1)

        fireEvent('notify.frontend', type='manage.updating', data=False)
        self.in_progress = False
Example #46
0
 def add_torrent_file(self, filename, filedata, add_folder=False):
     action = "action=add-file"
     if add_folder:
         action += "&path=%s" % urllib.quote(filename)
     return self._request(action,
                          {"torrent_file": (ss(filename), filedata)})
Example #47
0
 def add_torrent_file(self, filename, filedata, add_folder=False):
     action = 'action=add-file'
     if add_folder:
         action += '&path=%s' % urllib.parse.quote(filename)
     return self._request(action,
                          {'torrent_file': (ss(filename), filedata)})
Example #48
0
    def getAllDownloadStatus(self):

        log.debug('Checking uTorrent download status.')

        if not self.connect():
            return False

        release_downloads = ReleaseDownloadList(self)

        data = self.utorrent_api.get_status()
        if not data:
            log.error('Error getting data from uTorrent')
            return False

        queue = json.loads(data)
        if queue.get('error'):
            log.error('Error getting data from uTorrent: %s',
                      queue.get('error'))
            return False

        if not queue.get('torrents'):
            log.debug('Nothing in queue')
            return False

        # Get torrents
        for torrent in queue['torrents']:

            #Get files of the torrent
            torrent_files = []
            try:
                torrent_files = json.loads(
                    self.utorrent_api.get_files(torrent[0]))
                torrent_files = [
                    os.path.join(torrent[26], torrent_file[0])
                    for torrent_file in torrent_files['files'][1]
                ]
            except:
                log.debug('Failed getting files from torrent: %s', torrent[2])

            status_flags = {
                "STARTED": 1,
                "CHECKING": 2,
                "CHECK-START": 4,
                "CHECKED": 8,
                "ERROR": 16,
                "PAUSED": 32,
                "QUEUED": 64,
                "LOADED": 128
            }

            status = 'busy'
            if (torrent[1] & status_flags["STARTED"] or torrent[1]
                    & status_flags["QUEUED"]) and torrent[4] == 1000:
                status = 'seeding'
            elif (torrent[1] & status_flags["ERROR"]):
                status = 'failed'
            elif torrent[4] == 1000:
                status = 'completed'

            if not status == 'busy':
                self.removeReadOnly(torrent_files)

            release_downloads.append({
                'id':
                torrent[0],
                'name':
                torrent[2],
                'status':
                status,
                'seed_ratio':
                float(torrent[7]) / 1000,
                'original_status':
                torrent[1],
                'timeleft':
                str(timedelta(seconds=torrent[10])),
                'folder':
                ss(torrent[26]),
                'files':
                ss('|'.join(torrent_files))
            })

        return release_downloads
Example #49
0
    def scan(self,
             folder=None,
             files=None,
             release_download=None,
             simple=False,
             newer_than=0,
             return_ignored=True,
             on_found=None):

        folder = sp(folder)

        if not folder or not os.path.isdir(folder):
            log.error('Folder doesn\'t exists: %s', folder)
            return {}

        # Get movie "master" files
        movie_files = {}
        leftovers = []

        # Scan all files of the folder if no files are set
        if not files:
            check_file_date = True
            try:
                files = []
                for root, dirs, walk_files in os.walk(folder,
                                                      followlinks=True):
                    files.extend([
                        sp(os.path.join(sp(root), ss(filename)))
                        for filename in walk_files
                    ])

                    # Break if CP wants to shut down
                    if self.shuttingDown():
                        break

            except:
                log.error('Failed getting files from %s: %s',
                          (folder, traceback.format_exc()))

            log.debug('Found %s files to scan and group in %s',
                      (len(files), folder))
        else:
            check_file_date = False
            files = [sp(x) for x in files]

        for file_path in files:

            if not os.path.exists(file_path):
                continue

            # Remove ignored files
            if self.isSampleFile(file_path):
                leftovers.append(file_path)
                continue
            elif not self.keepFile(file_path):
                continue

            is_dvd_file = self.isDVDFile(file_path)
            if self.filesizeBetween(
                    file_path, self.file_sizes['movie']
            ) or is_dvd_file:  # Minimal 300MB files or is DVD file

                # Normal identifier
                identifier = self.createStringIdentifier(
                    file_path, folder, exclude_filename=is_dvd_file)
                identifiers = [identifier]

                # Identifier with quality
                quality = fireEvent('quality.guess',
                                    files=[file_path],
                                    size=self.getFileSize(file_path),
                                    single=True) if not is_dvd_file else {
                                        'identifier': 'dvdr'
                                    }
                if quality:
                    identifier_with_quality = '%s %s' % (
                        identifier, quality.get('identifier', ''))
                    identifiers = [identifier_with_quality, identifier]

                if not movie_files.get(identifier):
                    movie_files[identifier] = {
                        'unsorted_files': [],
                        'identifiers': identifiers,
                        'is_dvd': is_dvd_file,
                    }

                movie_files[identifier]['unsorted_files'].append(file_path)
            else:
                leftovers.append(file_path)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleanup
        del files

        # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
        # files will be grouped first.
        leftovers = set(sorted(leftovers, reverse=True))

        # Group files minus extension
        ignored_identifiers = []
        for identifier, group in movie_files.items():
            if identifier not in group['identifiers'] and len(identifier) > 0:
                group['identifiers'].append(identifier)

            log.debug('Grouping files: %s', identifier)

            has_ignored = 0
            for file_path in list(group['unsorted_files']):
                ext = getExt(file_path)
                wo_ext = file_path[:-(len(ext) + 1)]
                found_files = set([i for i in leftovers if wo_ext in i])
                group['unsorted_files'].extend(found_files)
                leftovers = leftovers - found_files

                has_ignored += 1 if ext == 'ignore' else 0

            if has_ignored == 0:
                for file_path in list(group['unsorted_files']):
                    ext = getExt(file_path)
                    has_ignored += 1 if ext == 'ignore' else 0

            if has_ignored > 0:
                ignored_identifiers.append(identifier)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Create identifiers for all leftover files
        path_identifiers = {}
        for file_path in leftovers:
            identifier = self.createStringIdentifier(file_path, folder)

            if not path_identifiers.get(identifier):
                path_identifiers[identifier] = []

            path_identifiers[identifier].append(file_path)

        # Group the files based on the identifier
        delete_identifiers = []
        for identifier, found_files in path_identifiers.items():
            log.debug('Grouping files on identifier: %s', identifier)

            group = movie_files.get(identifier)
            if group:
                group['unsorted_files'].extend(found_files)
                delete_identifiers.append(identifier)

                # Remove the found files from the leftover stack
                leftovers = leftovers - set(found_files)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Group based on folder
        delete_identifiers = []
        for identifier, found_files in path_identifiers.items():
            log.debug('Grouping files on foldername: %s', identifier)

            for ff in found_files:
                new_identifier = self.createStringIdentifier(
                    os.path.dirname(ff), folder)

                group = movie_files.get(new_identifier)
                if group:
                    group['unsorted_files'].extend([ff])
                    delete_identifiers.append(identifier)

                    # Remove the found files from the leftover stack
                    leftovers -= leftovers - set([ff])

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # leftovers should be empty
        if leftovers:
            log.debug('Some files are still left over: %s', leftovers)

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Make sure we remove older / still extracting files
        valid_files = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = movie_files.popitem()
            except:
                break

            # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute
            if check_file_date:
                files_too_new, time_string = self.checkFilesChanged(
                    group['unsorted_files'])
                if files_too_new:
                    log.info(
                        'Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s',
                        (time_string, identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            # Only process movies newer than x
            if newer_than and newer_than > 0:
                has_new_files = False
                for cur_file in group['unsorted_files']:
                    file_time = self.getFileTimes(cur_file)
                    if file_time[0] > newer_than or file_time[1] > newer_than:
                        has_new_files = True
                        break

                if not has_new_files:
                    log.debug(
                        'None of the files have changed since %s for %s, skipping.',
                        (time.ctime(newer_than), identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            valid_files[identifier] = group

        del movie_files

        total_found = len(valid_files)

        # Make sure only one movie was found if a download ID is provided
        if release_download and total_found == 0:
            log.info(
                'Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).',
                release_download.get('imdb_id'))
        elif release_download and total_found > 1:
            log.info(
                'Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...',
                (release_download.get('imdb_id'), len(valid_files)))
            release_download = None

        # Determine file types
        processed_movies = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = valid_files.popitem()
            except:
                break

            if return_ignored is False and identifier in ignored_identifiers:
                log.debug('Ignore file found, ignoring release: %s',
                          identifier)
                continue

            # Group extra (and easy) files first
            group['files'] = {
                'movie_extra': self.getMovieExtras(group['unsorted_files']),
                'subtitle': self.getSubtitles(group['unsorted_files']),
                'subtitle_extra':
                self.getSubtitlesExtras(group['unsorted_files']),
                'nfo': self.getNfo(group['unsorted_files']),
                'trailer': self.getTrailers(group['unsorted_files']),
                'leftover': set(group['unsorted_files']),
            }

            # Media files
            if group['is_dvd']:
                group['files']['movie'] = self.getDVDFiles(
                    group['unsorted_files'])
            else:
                group['files']['movie'] = self.getMediaFiles(
                    group['unsorted_files'])

            if len(group['files']['movie']) == 0:
                log.error('Couldn\'t find any movie files for %s', identifier)
                continue

            log.debug('Getting metadata for %s', identifier)
            group['meta_data'] = self.getMetaData(
                group, folder=folder, release_download=release_download)

            # Subtitle meta
            group['subtitle_language'] = self.getSubtitleLanguage(
                group) if not simple else {}

            # Get parent dir from movie files
            for movie_file in group['files']['movie']:
                group['parentdir'] = os.path.dirname(movie_file)
                group['dirname'] = None

                folder_names = group['parentdir'].replace(folder, '').split(
                    os.path.sep)
                folder_names.reverse()

                # Try and get a proper dirname, so no "A", "Movie", "Download" etc
                for folder_name in folder_names:
                    if folder_name.lower(
                    ) not in self.ignore_names and len(folder_name) > 2:
                        group['dirname'] = folder_name
                        break

                break

            # Leftover "sorted" files
            for file_type in group['files']:
                if not file_type is 'leftover':
                    group['files']['leftover'] -= set(
                        group['files'][file_type])
                    group['files'][file_type] = list(group['files'][file_type])
            group['files']['leftover'] = list(group['files']['leftover'])

            # Delete the unsorted list
            del group['unsorted_files']

            # Determine movie
            group['media'] = self.determineMedia(
                group, release_download=release_download)
            if not group['media']:
                log.error('Unable to determine media: %s',
                          group['identifiers'])
            else:
                group['identifier'] = getIdentifier(
                    group['media']) or group['media']['info'].get('imdb')

            processed_movies[identifier] = group

            # Notify parent & progress on something found
            if on_found:
                on_found(group, total_found,
                         total_found - len(processed_movies))

            # Wait for all the async events calm down a bit
            while threading.activeCount() > 100 and not self.shuttingDown():
                log.debug('Too many threads active, waiting a few seconds')
                time.sleep(10)

        if len(processed_movies) > 0:
            log.info('Found %s movies in the folder %s',
                     (len(processed_movies), folder))
        else:
            log.debug('Found no movies in the folder %s', folder)

        return processed_movies
Example #50
0
    def urlopen(self,
                url,
                timeout=30,
                params=None,
                headers=None,
                opener=None,
                multipart=False,
                show_error=True):
        url = ss(url)

        if not headers: headers = {}
        if not params: params = {}

        # Fill in some headers
        parsed_url = urlparse(url)
        host = '%s%s' % (parsed_url.hostname,
                         (':' +
                          str(parsed_url.port) if parsed_url.port else ''))

        headers['Referer'] = headers.get('Referer',
                                         '%s://%s' % (parsed_url.scheme, host))
        headers['Host'] = headers.get('Host', host)
        headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
        headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
        headers['Connection'] = headers.get('Connection', 'keep-alive')
        headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')

        # Don't try for failed requests
        if self.http_failed_disabled.get(host, 0) > 0:
            if self.http_failed_disabled[host] > (time.time() - 900):
                log.info2(
                    'Disabled calls to %s for 15 minutes because so many failed requests.',
                    host)
                if not show_error:
                    raise
                else:
                    return ''
            else:
                del self.http_failed_request[host]
                del self.http_failed_disabled[host]

        self.wait(host)
        try:

            # Make sure opener has the correct headers
            if opener:
                opener.add_headers = headers

            if multipart:
                log.info('Opening multipart url: %s, params: %s',
                         (url, [x for x in params.iterkeys()] if isinstance(
                             params, dict) else 'with data'))
                request = urllib2.Request(url, params, headers)

                if opener:
                    opener.add_handler(MultipartPostHandler())
                else:
                    cookies = cookielib.CookieJar()
                    opener = urllib2.build_opener(
                        urllib2.HTTPCookieProcessor(cookies),
                        MultipartPostHandler)

                response = opener.open(request, timeout=timeout)
            else:
                log.info('Opening url: %s, params: %s',
                         (url, [x for x in params.iterkeys()] if isinstance(
                             params, dict) else 'with data'))

                if isinstance(params, (str, unicode)) and len(params) > 0:
                    data = params
                else:
                    data = tryUrlencode(params) if len(params) > 0 else None

                request = urllib2.Request(url, data, headers)

                if opener:
                    response = opener.open(request, timeout=timeout)
                else:
                    response = urllib2.urlopen(request, timeout=timeout)

            # unzip if needed
            if response.info().get('Content-Encoding') == 'gzip':
                buf = StringIO(response.read())
                f = gzip.GzipFile(fileobj=buf)
                data = f.read()
                f.close()
            else:
                data = response.read()
            response.close()

            self.http_failed_request[host] = 0
        except IOError:
            if show_error:
                log.error('Failed opening url in %s: %s %s',
                          (self.getName(), url, traceback.format_exc(1)))

            # Save failed requests by hosts
            try:
                if not self.http_failed_request.get(host):
                    self.http_failed_request[host] = 1
                else:
                    self.http_failed_request[host] += 1

                    # Disable temporarily
                    if self.http_failed_request[host] > 5:
                        self.http_failed_disabled[host] = time.time()

            except:
                log.debug('Failed logging failed requests for %s: %s',
                          (url, traceback.format_exc()))

            raise

        self.http_last_use[host] = time.time()

        return data
Example #51
0
def md5(text):
    return hashlib.md5(ss(text)).hexdigest()
Example #52
0
            except:
                nzb_id = item['NZBID']
            statuses.append({
                'id':
                nzb_id,
                'name':
                item['NZBFilename'],
                'status':
                'completed' if item['ParStatus'] == 'SUCCESS'
                and item['ScriptStatus'] == 'SUCCESS' else 'failed',
                'original_status':
                item['ParStatus'] + ', ' + item['ScriptStatus'],
                'timeleft':
                str(timedelta(seconds=0)),
                'folder':
                ss(item['DestDir'])
            })

        return statuses

    def removeFailed(self, item):

        log.info('%s failed downloading, deleting...', item['name'])

        url = self.url % {
            'host': self.conf('host'),
            'username': self.conf('username'),
            'password': self.conf('password')
        }

        rpc = xmlrpclib.ServerProxy(url)
Example #53
0
    def download(self, data=None, media=None, filedata=None):
        if not media: media = {}
        if not data: data = {}

        if not filedata:
            log.error('Unable to get NZB file: %s', traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get('name'))

        nzb_name = ss('%s.nzb' % self.createNzbName(data, media))

        url = cleanHost(host=self.conf('host'),
                        ssl=self.conf('ssl'),
                        username=self.conf('username'),
                        password=self.conf('password')) + self.rpc
        rpc = xmlrpclib.ServerProxy(url)

        try:
            if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' %
                            nzb_name):
                log.debug('Successfully connected to NZBGet')
            else:
                log.info(
                    'Successfully connected to NZBGet, but unable to send a message'
                )
        except socket.error:
            log.error(
                'NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.'
            )
            return False
        except xmlrpclib.ProtocolError as e:
            if e.errcode == 401:
                log.error('Password is incorrect.')
            else:
                log.error('Protocol Error: %s', e)
            return False

        if re.search(r"^0", rpc.version()):
            xml_response = rpc.append(nzb_name, self.conf('category'), False,
                                      standard_b64encode(filedata.strip()))
        else:
            xml_response = rpc.append(nzb_name, self.conf('category'),
                                      tryInt(self.conf('priority')), False,
                                      standard_b64encode(filedata.strip()))

        if xml_response:
            log.info('NZB sent successfully to NZBGet')
            nzb_id = md5(data['url'])  # about as unique as they come ;)
            couchpotato_id = "couchpotato=" + nzb_id
            groups = rpc.listgroups()
            file_id = [
                item['LastID'] for item in groups
                if item['NZBFilename'] == nzb_name
            ]
            confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id,
                                      file_id)
            if confirmed:
                log.debug('couchpotato parameter set in nzbget download')
            return self.downloadReturnId(nzb_id)
        else:
            log.error('NZBGet could not add %s to the queue.', nzb_name)
            return False