Exemplo n.º 1
0
class CacheDatabase():
    _index_key = 'date'

    def __init__(self, path):
        self.db = Database(path)
        if not self.db.exists():
            index = CacheIndex(self.db.path, self._index_key)
            self.db.create()
            self.db.add_index(index)
        else:
            self.db.open()

    def insert(self, date, record):
        self.db.insert(dict(date=date, record=record))

    def insert_all(self, records):
        now = mktime(gmtime())
        for record in records:
            self.insert(now, record)

    def since(self, date):
        return self.db.get_many(self._index_key,
                                limit=-1,
                                inclusive_start=False,
                                with_doc=True,
                                start=date)
Exemplo n.º 2
0
class CacheDatabase():
    _index_key = 'date'

    def __init__(self, path):
        self.db = Database(path)
        if not self.db.exists():
            index = CacheIndex(self.db.path, self._index_key)
            self.db.create()
            self.db.add_index(index)
        else:
            self.db.open()

    def insert(self, date, record):
        self.db.insert(dict(date=date, record=record))

    def insert_all(self, records):
        now = mktime(gmtime())
        for record in records:
            self.insert(now, record)

    def since(self, date):
        return self.db.get_many(
            self._index_key, limit=-1, inclusive_start=False, with_doc=True,
            start=date
        )
Exemplo n.º 3
0
    def __init__(self, name=''):
        self.name = name
        self.old_db_path = ''

        self.db_path = os.path.join(sickrage.app.data_dir, 'database',
                                    self.name)
        self.db = SuperThreadSafeDatabase(self.db_path)
Exemplo n.º 4
0
    def __init__(self, name=''):
        self.name = name
        self.old_db_path = ''

        self.db_path = os.path.join(sickrage.DATA_DIR, 'database', self.name)
        self.db = SuperThreadSafeDatabase(self.db_path)

        if self.db.exists():
            self.db.open()
Exemplo n.º 5
0
    def __init__(self, path=None):
        super(CodernityDbStorage, self).__init__()

        self.path = path

        self.database = SuperThreadSafeDatabase(path)

        if os.path.exists(path):
            self.database.open()
Exemplo n.º 6
0
 def __init__(self, path):
     self.db = Database(path)
     if not self.db.exists():
         index = CacheIndex(self.db.path, self._index_key)
         self.db.create()
         self.db.add_index(index)
     else:
         self.db.open()
Exemplo n.º 7
0
 def __init__(self, path):
     self.db = Database(path)
     if not self.db.exists():
         index = CacheIndex(self.db.path, self._index_key)
         self.db.create()
         self.db.add_index(index)
     else:
         self.db.open()
Exemplo n.º 8
0
    def __init__(self, path=None):
        super(CodernityDbStorage, self).__init__()

        self.path = path

        self.database = SuperThreadSafeDatabase(path)

        if os.path.exists(path):
            self.database.open()
Exemplo n.º 9
0
class srDatabase(object):
    _indexes = {}
    _migrate_list = {}

    def __init__(self, name=''):
        self.name = name
        self.old_db_path = ''

        self.db_path = os.path.join(sickrage.app.data_dir, 'database', self.name)
        self.db = SuperThreadSafeDatabase(self.db_path)

    def initialize(self):
        # Remove database folder if both exists
        if self.db.exists() and os.path.isfile(self.old_db_path):
            self.db.open()
            self.db.destroy()

        if self.db.exists():
            self.backup()
            self.db.open()
        else:
            self.db.create()

        # setup database indexes
        self.setup_indexes()

    def backup(self):
        # Backup before start and cleanup old backups
        backup_path = os.path.join(sickrage.app.data_dir, 'db_backup', self.name)
        backup_count = 5
        existing_backups = []

        if not os.path.isdir(backup_path):
            os.makedirs(backup_path)

        for root, dirs, files in os.walk(backup_path):
            # Only consider files being a direct child of the backup_path
            if root == backup_path:
                for backup_file in sorted(files):
                    ints = re.findall('\d+', backup_file)

                    # Delete non zip files
                    if len(ints) != 1:
                        try:
                            os.remove(os.path.join(root, backup_file))
                        except:
                            pass
                    else:
                        existing_backups.append((int(ints[0]), backup_file))
            else:
                # Delete stray directories.
                shutil.rmtree(root)

        # Remove all but the last 5
        for eb in existing_backups[:-backup_count]:
            os.remove(os.path.join(backup_path, eb[1]))

        # Create new backup
        new_backup = os.path.join(backup_path, '%s.tar.gz' % int(time.time()))
        with tarfile.open(new_backup, 'w:gz') as zipf:
            for root, dirs, files in os.walk(self.db_path):
                for zfilename in files:
                    zipf.add(os.path.join(root, zfilename),
                             arcname='database/%s/%s' % (
                                 self.name,
                                 os.path.join(root[len(self.db_path) + 1:], zfilename))
                             )

    def compact(self, try_repair=True, **kwargs):
        # Removing left over compact files
        for f in os.listdir(self.db.path):
            for x in ['_compact_buck', '_compact_stor']:
                if f[-len(x):] == x:
                    os.unlink(os.path.join(self.db.path, f))

        try:
            start = time.time()
            size = float(self.db.get_db_details().get('size', 0))
            sickrage.app.log.info(
                'Compacting {} database, current size: {}MB'.format(self.name, round(size / 1048576, 2)))

            self.db.compact()

            new_size = float(self.db.get_db_details().get('size', 0))
            sickrage.app.log.info(
                'Done compacting {} database in {}s, new size: {}MB, saved: {}MB'.format(
                    self.name, round(time.time() - start, 2),
                    round(new_size / 1048576, 2), round((size - new_size) / 1048576, 2))
            )
        except (IndexException, AttributeError, TypeError) as e:
            if try_repair:
                sickrage.app.log.debug('Something wrong with indexes, trying repair')

                # Remove all indexes
                old_indexes = self._indexes.keys()
                for index_name in old_indexes:
                    try:
                        self.db.destroy_index(index_name)
                    except IndexNotFoundException:
                        pass
                    except:
                        sickrage.app.log.debug('Failed removing old index %s', index_name)

                # Add them again
                for index_name in self._indexes:
                    try:
                        self.db.add_index(self._indexes[index_name](self.db.path, index_name))
                        self.db.reindex_index(index_name)
                    except IndexConflict:
                        pass
                    except:
                        sickrage.app.log.debug('Failed adding index %s', index_name)
                        raise

                self.compact(try_repair=False)
            else:
                sickrage.app.log.debug('Failed compact: {}'.format(traceback.format_exc()))
        except:
            sickrage.app.log.debug('Failed compact: {}'.format(traceback.format_exc()))

    def setup_indexes(self):
        # setup database indexes
        for index_name in self._indexes:
            try:
                # Make sure store and bucket don't exist
                exists = []
                for x in ['buck', 'stor']:
                    full_path = os.path.join(self.db.path, '%s_%s' % (index_name, x))
                    if os.path.exists(full_path):
                        exists.append(full_path)

                if index_name not in self.db.indexes_names:
                    # Remove existing buckets if index isn't there
                    for x in exists:
                        os.unlink(x)

                    self.db.add_index(self._indexes[index_name](self.db.path, index_name))
                    self.db.reindex_index(index_name)
                else:
                    # Previous info
                    previous_version = self.db.indexes_names[index_name]._version
                    current_version = self._indexes[index_name]._version

                    self.check_versions(index_name, current_version, previous_version)
            except:
                sickrage.app.log.debug('Failed adding index {}'.format(index_name))

    def check_versions(self, index_name, current_version, previous_version):
        # Only edit index if versions are different
        if previous_version < current_version:
            self.db.destroy_index(self.db.indexes_names[index_name])
            self.db.add_index(self._indexes[index_name](self.db.path, index_name))
            self.db.reindex_index(index_name)

    def close(self):
        self.db.close()

    def upgrade(self):
        pass

    def cleanup(self):
        pass

    @property
    def version(self):
        try:
            dbData = list(self.all('version'))[-1]
        except IndexError:
            dbData = {
                '_t': 'version',
                'database_version': 1
            }

            dbData.update(self.insert(dbData))

        return dbData['database_version']

    @property
    def opened(self):
        return self.db.opened

    def check_integrity(self):
        for index_name in self._indexes:
            sickrage.app.log.debug('Checking data integrity for index {}'.format(index_name))

            data = []
            failed = False

            # check integrity of index data
            for x in self.db.all(index_name):
                try:
                    data += [self.get('id', x.get('_id'))]
                except Exception:
                    failed = True

            # check if we failed integrity check, if so then destroy index
            if failed and index_name in self.db.indexes_names:
                self.db.destroy_index(self.db.indexes_names[index_name])

            # check if index exists, if not then add it
            if index_name not in self.db.indexes_names:
                self.db.add_index(self._indexes[index_name](self.db.path, index_name))

            # rebuild index if failed
            if failed:
                sickrage.app.log.debug('Failed data integrity check, rebuilding index {}'.format(index_name))
                for x in data:
                    del x['_id'], x['_rev']
                    self.insert(x)

            # cleanup
            del data

    def migrate(self):
        if os.path.isfile(self.old_db_path):
            sickrage.app.log.info('=' * 30)
            sickrage.app.log.info('Migrating %s database, please wait...', self.name)
            migrate_start = time.time()

            import sqlite3
            conn = sqlite3.connect(self.old_db_path)
            conn.text_factory = lambda x: (x.decode('utf-8', 'ignore'))

            migrate_data = {}
            rename_old = False

            try:
                c = conn.cursor()

                for ml in self._migrate_list:
                    migrate_data[ml] = {}
                    rows = self._migrate_list[ml]

                    try:
                        c.execute('SELECT {} FROM `{}`'.format('`' + '`,`'.join(rows) + '`', ml))
                    except:
                        # ignore faulty destination_id database
                        rename_old = True
                        raise

                    for p in c.fetchall():
                        columns = {}
                        for row in self._migrate_list[ml]:
                            columns[row] = p[rows.index(row)]

                        if not migrate_data[ml].get(p[0]):
                            migrate_data[ml][p[0]] = columns
                        else:
                            if not isinstance(migrate_data[ml][p[0]], list):
                                migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
                            migrate_data[ml][p[0]].append(columns)

                sickrage.app.log.info('Getting data took %s', (time.time() - migrate_start))

                if not self.db.opened:
                    return

                for t_name in migrate_data:
                    t_data = migrate_data.get(t_name, {})
                    sickrage.app.log.info('Importing %s %s' % (len(t_data), t_name))
                    for k, v in t_data.items():
                        if isinstance(v, list):
                            for d in v:
                                d.update({'_t': t_name})
                                self.insert(d)
                        else:
                            v.update({'_t': t_name})
                            self.insert(v)

                sickrage.app.log.info('Total migration took %s', (time.time() - migrate_start))
                sickrage.app.log.info('=' * 30)

                rename_old = True
            except OperationalError:
                sickrage.app.log.debug('Migrating from unsupported/corrupt %s database version', self.name)
                rename_old = True
            except:
                sickrage.app.log.debug('Migration of %s database failed', self.name)
            finally:
                conn.close()

            # rename old database
            if rename_old:
                random = randomString()
                sickrage.app.log.info('Renaming old database to %s.%s_old' % (self.old_db_path, random))
                os.rename(self.old_db_path, '{}.{}_old'.format(self.old_db_path, random))

                if os.path.isfile(self.old_db_path + '-wal'):
                    os.rename(self.old_db_path + '-wal', '{}-wal.{}_old'.format(self.old_db_path, random))
                if os.path.isfile(self.old_db_path + '-shm'):
                    os.rename(self.old_db_path + '-shm', '{}-shm.{}_old'.format(self.old_db_path, random))

    def delete_corrupted(self, _id, traceback_error=''):
        try:
            sickrage.app.log.debug('Deleted corrupted document "{}": {}'.format(_id, traceback_error))
            corrupted = self.db.get('id', _id, with_storage=False)
            self.db._delete_id_index(corrupted.get('_id'), corrupted.get('_rev'), None)
        except:
            log.debug('Failed deleting corrupted: {}'.format(traceback.format_exc()))

    def all(self, *args, **kwargs):
        with_doc = kwargs.pop('with_doc', True)
        for data in self.db.all(*args, **kwargs):
            if with_doc :
                try:
                    doc = self.db.get('id', data['_id'])
                    yield doc
                except (RecordDeleted, RecordNotFound):
                    sickrage.app.log.debug('Record not found, skipping: {}'.format(data['_id']))
                except (ValueError, EOFError):
                    self.delete_corrupted(data.get('_id'), traceback_error=traceback.format_exc(0))
            else:
                yield data

    def get_many(self, *args, **kwargs):
        with_doc = kwargs.pop('with_doc', True)
        for data in self.db.get_many(*args, **kwargs):
            if with_doc:
                try:
                    doc = self.db.get('id', data['_id'])
                    yield doc
                except (RecordDeleted, RecordNotFound):
                    sickrage.app.log.debug('Record not found, skipping: {}'.format(data['_id']))
                except (ValueError, EOFError):
                    self.delete_corrupted(data.get('_id'), traceback_error=traceback.format_exc(0))
            else:
                yield data

    def get(self, *args, **kwargs):
        try:
            x = self.db.get(with_doc=kwargs.get('with_doc', True), *args, **kwargs)
            return x.get('doc', x)
        except (RecordDeleted, RecordNotFound):
            pass

    def delete(self, *args):
        return self.db.delete(*args)

    def update(self, *args):
        return self.db.update(*args)

    def insert(self, *args):
        return self.db.insert(*args)
Exemplo n.º 10
0
def runCouchPotato(options,
                   base_path,
                   args,
                   data_dir=None,
                   log_dir=None,
                   Env=None,
                   desktop=None):

    try:
        locale.setlocale(locale.LC_ALL, "")
        encoding = locale.getpreferredencoding()
    except (locale.Error, IOError):
        encoding = None

    # for OSes that are poorly configured I'll just force UTF-8
    if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
        encoding = 'UTF-8'

    Env.set('encoding', encoding)

    # Do db stuff
    db_path = sp(os.path.join(data_dir, 'database'))
    old_db_path = os.path.join(data_dir, 'couchpotato.db')

    # Remove database folder if both exists
    if os.path.isdir(db_path) and os.path.isfile(old_db_path):
        db = SuperThreadSafeDatabase(db_path)
        db.open()
        db.destroy()

    # Check if database exists
    db = SuperThreadSafeDatabase(db_path)
    db_exists = db.exists()
    if db_exists:

        # Backup before start and cleanup old backups
        backup_path = sp(os.path.join(data_dir, 'db_backup'))
        backup_count = 5
        existing_backups = []
        if not os.path.isdir(backup_path): os.makedirs(backup_path)

        for root, dirs, files in os.walk(backup_path):
            # Only consider files being a direct child of the backup_path
            if root == backup_path:
                for backup_file in sorted(files):
                    ints = re.findall('\d+', backup_file)

                    # Delete non zip files
                    if len(ints) != 1:
                        os.remove(os.path.join(root, backup_file))
                    else:
                        existing_backups.append((int(ints[0]), backup_file))
            else:
                # Delete stray directories.
                shutil.rmtree(root)

        # Remove all but the last 5
        for eb in existing_backups[:-backup_count]:
            os.remove(os.path.join(backup_path, eb[1]))

        # Create new backup
        new_backup = sp(
            os.path.join(backup_path, '%s.tar.gz' % int(time.time())))
        zipf = tarfile.open(new_backup, 'w:gz')
        for root, dirs, files in os.walk(db_path):
            for zfilename in files:
                zipf.add(os.path.join(root, zfilename),
                         arcname='database/%s' %
                         os.path.join(root[len(db_path) + 1:], zfilename))
        zipf.close()

        # Open last
        db.open()

    else:
        db.create()

    # Force creation of cachedir
    log_dir = sp(log_dir)
    cache_dir = sp(os.path.join(data_dir, 'cache'))
    python_cache = sp(os.path.join(cache_dir, 'python'))

    if not os.path.exists(cache_dir):
        os.mkdir(cache_dir)
    if not os.path.exists(python_cache):
        os.mkdir(python_cache)

    # Register environment settings
    Env.set('app_dir', sp(base_path))
    Env.set('data_dir', sp(data_dir))
    Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log')))
    Env.set('db', db)
    Env.set('http_opener', requests.Session())
    Env.set('cache_dir', cache_dir)
    Env.set('cache', FileSystemCache(python_cache))
    Env.set('console_log', options.console_log)
    Env.set('quiet', options.quiet)
    Env.set('desktop', desktop)
    Env.set('daemonized', options.daemon)
    Env.set('args', args)
    Env.set('options', options)

    # Determine debug
    debug = options.debug or Env.setting('debug', default=False, type='bool')
    Env.set('debug', debug)

    # Development
    development = Env.setting('development', default=False, type='bool')
    Env.set('dev', development)

    # Disable logging for some modules
    for logger_name in [
            'enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado',
            'requests'
    ]:
        logging.getLogger(logger_name).setLevel(logging.ERROR)

    for logger_name in ['gntp']:
        logging.getLogger(logger_name).setLevel(logging.WARNING)

    # Disable SSL warning
    disable_warnings()

    # Use reloader
    reloader = debug is True and development and not Env.get(
        'desktop') and not options.daemon

    # Logger
    logger = logging.getLogger()
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s',
                                  '%m-%d %H:%M:%S')
    level = logging.DEBUG if debug else logging.INFO
    logger.setLevel(level)
    logging.addLevelName(19, 'INFO')

    # To screen
    if (debug or
            options.console_log) and not options.quiet and not options.daemon:
        hdlr = logging.StreamHandler(sys.stderr)
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)

    # To file
    hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'),
                                         'a',
                                         500000,
                                         10,
                                         encoding=Env.get('encoding'))
    hdlr2.setFormatter(formatter)
    logger.addHandler(hdlr2)

    # Start logging & enable colors
    # noinspection PyUnresolvedReferences
    import color_logs
    from couchpotato.core.logger import CPLog
    log = CPLog(__name__)
    log.debug('Started with options %s', options)

    # Check available space
    try:
        total_space, available_space = getFreeSpace(data_dir)
        if available_space < 100:
            log.error(
                'Shutting down as CP needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                available_space)
            return
    except:
        log.error('Failed getting diskspace: %s', traceback.format_exc())

    def customwarn(message, category, filename, lineno, file=None, line=None):
        log.warning('%s %s %s line:%s', (category, message, filename, lineno))

    warnings.showwarning = customwarn

    # Create app
    from couchpotato import WebHandler
    web_base = ('/' + Env.setting('url_base').lstrip('/') +
                '/') if Env.setting('url_base') else '/'
    Env.set('web_base', web_base)

    api_key = Env.setting('api_key')
    if not api_key:
        api_key = uuid4().hex
        Env.setting('api_key', value=api_key)

    api_base = r'%sapi/%s/' % (web_base, api_key)
    Env.set('api_base', api_base)

    # Basic config
    host = Env.setting('host', default='0.0.0.0')
    # app.debug = development
    config = {
        'use_reloader': reloader,
        'port': tryInt(Env.setting('port', default=5050)),
        'host': host if host and len(host) > 0 else '0.0.0.0',
        'ssl_cert': Env.setting('ssl_cert', default=None),
        'ssl_key': Env.setting('ssl_key', default=None),
    }

    # Load the app
    application = Application(
        [],
        log_function=lambda x: None,
        debug=config['use_reloader'],
        gzip=True,
        cookie_secret=api_key,
        login_url='%slogin/' % web_base,
    )
    Env.set('app', application)

    # Request handlers
    application.add_handlers(
        ".*$",
        [
            (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler),

            # API handlers
            (r'%s(.*)(/?)' % api_base, ApiHandler),  # Main API handler
            (r'%sgetkey(/?)' % web_base, KeyHandler),  # Get API key
            (r'%s' % api_base, RedirectHandler, {
                "url": web_base + 'docs/'
            }),  # API docs

            # Login handlers
            (r'%slogin(/?)' % web_base, LoginHandler),
            (r'%slogout(/?)' % web_base, LogoutHandler),

            # Catch all webhandlers
            (r'%s(.*)(/?)' % web_base, WebHandler),
            (r'(.*)', WebHandler),
        ])

    # Static paths
    static_path = '%sstatic/' % web_base
    for dir_name in ['fonts', 'images', 'scripts', 'style']:
        application.add_handlers(
            ".*$",
            [('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {
                'path':
                sp(os.path.join(base_path, 'couchpotato', 'static', dir_name))
            })])
    Env.set('static_path', static_path)

    # Load configs & plugins
    loader = Env.get('loader')
    loader.preload(root=sp(base_path))
    loader.run()

    # Fill database with needed stuff
    fireEvent('database.setup')
    if not db_exists:
        fireEvent('app.initialize', in_order=True)
    fireEvent('app.migrate')

    # Go go go!
    from tornado.ioloop import IOLoop
    from tornado.autoreload import add_reload_hook
    loop = IOLoop.current()

    # Reload hook
    def reload_hook():
        fireEvent('app.shutdown')

    add_reload_hook(reload_hook)

    # Some logging and fire load event
    try:
        log.info('Starting server on port %(port)s', config)
    except:
        pass
    fireEventAsync('app.load')

    ssl_options = None
    if config['ssl_cert'] and config['ssl_key']:
        ssl_options = {
            'certfile': config['ssl_cert'],
            'keyfile': config['ssl_key'],
        }

    server = HTTPServer(application,
                        no_keep_alive=True,
                        ssl_options=ssl_options)

    try_restart = True
    restart_tries = 5

    while try_restart:
        try:
            server.listen(config['port'], config['host'])
            loop.start()
            server.close_all_connections()
            server.stop()
            loop.close(all_fds=True)
        except Exception as e:
            log.error('Failed starting: %s', traceback.format_exc())
            try:
                nr, msg = e
                if nr == 48:
                    log.info(
                        'Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds',
                        (config.get('port'), restart_tries))
                    time.sleep(1)
                    restart_tries -= 1

                    if restart_tries > 0:
                        continue
                    else:
                        return
            except ValueError:
                return
            except:
                pass

            raise

        try_restart = False
Exemplo n.º 11
0
class CodernityDbStorage(ProviderStorage, Plugin):
    __key__ = 'codernitydb'

    def __init__(self, path=None):
        super(CodernityDbStorage, self).__init__()

        self.path = path

        self.database = SuperThreadSafeDatabase(path)

        if os.path.exists(path):
            self.database.open()

    #
    # Provider methods
    #

    def create(self, source, target):
        if os.path.exists(self.path):
            return True

        # Create database
        self.database.create()

        # Add indices
        self.database.add_index(
            CollectionKeyIndex(self.database.path, 'collection_key'))
        self.database.add_index(
            MetadataKeyIndex(self.database.path, 'metadata_key'))
        self.database.add_index(
            MetadataCollectionIndex(self.database.path, 'metadata_collection'))
        self.database.add_index(ItemKeyIndex(self.database.path, 'item_key'))

        return True

    def open_database(self, source, target, database=None):
        return ModelRegistry['Database'].load(
            DatabaseCodernityDbStorage.open(self, source, target, database),
            source, target)

    #
    # Collection methods
    #

    def has_collection(self, source, target):
        try:
            self.database.get('collection_key', (source, target))
            return True
        except RecordNotFound:
            pass

        return False

    def get_collection_version(self, source, target):
        try:
            item = self.database.get('collection_key', (source, target),
                                     with_doc=True)

            if not item or 'doc' not in item:
                return None

            return Version(item['doc'].get('version'))
        except RecordNotFound:
            pass

        return None

    @Elapsed.track
    def update_collection(self, source, target, version):
        # Retrieve current item
        try:
            current = self.database.get('collection_key', (source, target),
                                        with_doc=True)

            if 'doc' in current:
                current = current['doc']
            else:
                current = None
        except RecordNotFound:
            current = None

        # Build collection metadata
        item = {
            '_': {
                't': 'collection',
                'c': {
                    's': source,
                    't': target
                }
            },

            # Collection attributes
            'version': str(version)
        }

        # Add current item identifier
        if current and '_id' in current and '_rev' in current:
            item['_id'] = current['_id']
            item['_rev'] = current['_rev']

        # Update database
        if current:
            # Update existing item
            try:
                self.database.update(item)
            except Exception, ex:
                log.warn('Unable to update collection: %s', ex, exc_info=True)
                return False

            log.debug('[%s -> %s] Updated collection', source, target)
            return True

        # Insert new item
        try:
            self.database.insert(item)
        except Exception, ex:
            log.warn('Unable to insert collection: %s', ex, exc_info=True)
            return False
Exemplo n.º 12
0
    def __init__(self, name=''):
        self.name = name
        self.old_db_path = ''

        self.db_path = os.path.join(sickrage.app.data_dir, 'database', self.name)
        self.db = SuperThreadSafeDatabase(self.db_path)
Exemplo n.º 13
0
class srDatabase(object):
    _indexes = {}
    _migrate_list = {}

    def __init__(self, name=''):
        self.name = name
        self.old_db_path = ''

        self.db_path = os.path.join(sickrage.app.data_dir, 'database', self.name)
        self.db = SuperThreadSafeDatabase(self.db_path)

    def initialize(self):
        # Remove database folder if both exists
        if self.db.exists() and os.path.isfile(self.old_db_path):
            self.db.open()
            self.db.destroy()

        if self.db.exists():
            # Backup before start and cleanup old backups
            backup_path = os.path.join(sickrage.app.data_dir, 'db_backup', self.name)
            backup_count = 5
            existing_backups = []
            if not os.path.isdir(backup_path): os.makedirs(backup_path)

            for root, dirs, files in os.walk(backup_path):
                # Only consider files being a direct child of the backup_path
                if root == backup_path:
                    for backup_file in sorted(files):
                        ints = re.findall('\d+', backup_file)

                        # Delete non zip files
                        if len(ints) != 1:
                            try:
                                os.remove(os.path.join(root, backup_file))
                            except:
                                pass
                        else:
                            existing_backups.append((int(ints[0]), backup_file))
                else:
                    # Delete stray directories.
                    shutil.rmtree(root)

            # Remove all but the last 5
            for eb in existing_backups[:-backup_count]:
                os.remove(os.path.join(backup_path, eb[1]))

            # Create new backup
            new_backup = os.path.join(backup_path, '%s.tar.gz' % int(time.time()))
            with tarfile.open(new_backup, 'w:gz') as zipf:
                for root, dirs, files in os.walk(self.db_path):
                    for zfilename in files:
                        zipf.add(os.path.join(root, zfilename),
                                 arcname='database/%s/%s' % (
                                     self.name,
                                     os.path.join(root[len(self.db_path) + 1:], zfilename))
                                 )

            self.db.open()
        else:
            self.db.create()

        # setup database indexes
        self.setupIndexes()

    def compact(self, try_repair=True, **kwargs):
        # Removing left over compact files
        for f in os.listdir(self.db.path):
            for x in ['_compact_buck', '_compact_stor']:
                if f[-len(x):] == x:
                    os.unlink(os.path.join(self.db.path, f))

        try:
            start = time.time()
            size = float(self.db.get_db_details().get('size', 0))
            sickrage.app.log.info(
                'Compacting {} database, current size: {}MB'.format(self.name, round(size / 1048576, 2)))

            self.db.compact()

            new_size = float(self.db.get_db_details().get('size', 0))
            sickrage.app.log.info(
                'Done compacting {} database in {}s, new size: {}MB, saved: {}MB'.format(
                    self.name, round(time.time() - start, 2),
                    round(new_size / 1048576, 2), round((size - new_size) / 1048576, 2))
            )
        except (IndexException, AttributeError, TypeError) as e:
            if try_repair:
                sickrage.app.log.debug('Something wrong with indexes, trying repair')

                # Remove all indexes
                old_indexes = self._indexes.keys()
                for index_name in old_indexes:
                    try:
                        self.db.destroy_index(index_name)
                    except IndexNotFoundException:
                        pass
                    except:
                        sickrage.app.log.debug('Failed removing old index %s', index_name)

                # Add them again
                for index_name in self._indexes:
                    try:
                        self.db.add_index(self._indexes[index_name](self.db.path, index_name))
                        self.db.reindex_index(index_name)
                    except IndexConflict:
                        pass
                    except:
                        sickrage.app.log.debug('Failed adding index %s', index_name)
                        raise

                self.compact(try_repair=False)
            else:
                sickrage.app.log.debug('Failed compact: {}'.format(traceback.format_exc()))
        except:
            sickrage.app.log.debug('Failed compact: {}'.format(traceback.format_exc()))

    def setupIndexes(self):
        # setup database indexes
        for index_name in self._indexes:
            try:
                # Make sure store and bucket don't exist
                exists = []
                for x in ['buck', 'stor']:
                    full_path = os.path.join(self.db.path, '%s_%s' % (index_name, x))
                    if os.path.exists(full_path):
                        exists.append(full_path)

                if index_name not in self.db.indexes_names:
                    # Remove existing buckets if index isn't there
                    for x in exists:
                        os.unlink(x)

                    self.db.add_index(self._indexes[index_name](self.db.path, index_name))
                    self.db.reindex_index(index_name)
                else:
                    # Previous info
                    previous_version = self.db.indexes_names[index_name]._version
                    current_version = self._indexes[index_name]._version

                    self.check_versions(index_name, current_version, previous_version)
            except:
                sickrage.app.log.debug('Failed adding index {}'.format(index_name))

    def check_versions(self, index_name, current_version, previous_version):
        # Only edit index if versions are different
        if previous_version < current_version:
            self.db.destroy_index(self.db.indexes_names[index_name])
            self.db.add_index(self._indexes[index_name](self.db.path, index_name))
            self.db.reindex_index(index_name)

    def close(self):
        self.db.close()

    def cleanup(self):
        pass

    @property
    def opened(self):
        return self.db.opened

    def check_integrity(self):
        for index_name in self._indexes:
            try:
                for x in self.db.all(index_name):
                    try:
                        self.db.get('id', x.get('_id'), with_doc=True)
                    except (ValueError, TypeError) as e:
                        self.db.delete(self.db.get(index_name, x.get('key'), with_doc=True)['doc'])
            except Exception as e:
                if index_name in self.db.indexes_names:
                    self.db.destroy_index(self.db.indexes_names[index_name])

    def migrate(self):
        if os.path.isfile(self.old_db_path):
            sickrage.app.log.info('=' * 30)
            sickrage.app.log.info('Migrating %s database, please wait...', self.name)
            migrate_start = time.time()

            import sqlite3
            conn = sqlite3.connect(self.old_db_path)
            conn.text_factory = lambda x: (x.decode('utf-8', 'ignore'))

            migrate_data = {}
            rename_old = False

            try:
                c = conn.cursor()

                for ml in self._migrate_list:
                    migrate_data[ml] = {}
                    rows = self._migrate_list[ml]

                    try:
                        c.execute('SELECT {} FROM `{}`'.format('`' + '`,`'.join(rows) + '`', ml))
                    except:
                        # ignore faulty destination_id database
                        rename_old = True
                        raise

                    for p in c.fetchall():
                        columns = {}
                        for row in self._migrate_list[ml]:
                            columns[row] = p[rows.index(row)]

                        if not migrate_data[ml].get(p[0]):
                            migrate_data[ml][p[0]] = columns
                        else:
                            if not isinstance(migrate_data[ml][p[0]], list):
                                migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
                            migrate_data[ml][p[0]].append(columns)

                sickrage.app.log.info('Getting data took %s', (time.time() - migrate_start))

                if not self.db.opened:
                    return

                for t_name in migrate_data:
                    t_data = migrate_data.get(t_name, {})
                    sickrage.app.log.info('Importing %s %s' % (len(t_data), t_name))
                    for k, v in t_data.items():
                        if isinstance(v, list):
                            for d in v:
                                d.update({'_t': t_name})
                                self.db.insert(d)
                        else:
                            v.update({'_t': t_name})
                            self.db.insert(v)

                sickrage.app.log.info('Total migration took %s', (time.time() - migrate_start))
                sickrage.app.log.info('=' * 30)

                rename_old = True
            except OperationalError:
                sickrage.app.log.debug('Migrating from unsupported/corrupt %s database version', self.name)
                rename_old = True
            except:
                sickrage.app.log.debug('Migration of %s database failed', self.name)
            finally:
                conn.close()

            # rename old database
            if rename_old:
                random = randomString()
                sickrage.app.log.info('Renaming old database to %s.%s_old' % (self.old_db_path, random))
                os.rename(self.old_db_path, '{}.{}_old'.format(self.old_db_path, random))

                if os.path.isfile(self.old_db_path + '-wal'):
                    os.rename(self.old_db_path + '-wal', '{}-wal.{}_old'.format(self.old_db_path, random))
                if os.path.isfile(self.old_db_path + '-shm'):
                    os.rename(self.old_db_path + '-shm', '{}-shm.{}_old'.format(self.old_db_path, random))
Exemplo n.º 14
0
def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, Env = None, desktop = None):

    try:
        locale.setlocale(locale.LC_ALL, "")
        encoding = locale.getpreferredencoding()
    except (locale.Error, IOError):
        encoding = None

    # for OSes that are poorly configured I'll just force UTF-8
    if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
        encoding = 'UTF-8'

    Env.set('encoding', encoding)

    # Do db stuff
    db_path = sp(os.path.join(data_dir, 'database'))

    # Check if database exists
    db = SuperThreadSafeDatabase(db_path)
    db_exists = db.exists()
    if db_exists:

        # Backup before start and cleanup old backups
        backup_path = sp(os.path.join(data_dir, 'db_backup'))
        backup_count = 5
        existing_backups = []
        if not os.path.isdir(backup_path): os.makedirs(backup_path)

        for root, dirs, files in os.walk(backup_path):
            for backup_file in sorted(files):
                ints = re.findall('\d+', backup_file)

                # Delete non zip files
                if len(ints) != 1:
                    os.remove(os.path.join(backup_path, backup_file))
                else:
                    existing_backups.append((int(ints[0]), backup_file))

        # Remove all but the last 5
        for eb in existing_backups[:-backup_count]:
            os.remove(os.path.join(backup_path, eb[1]))

        # Create new backup
        new_backup = sp(os.path.join(backup_path, '%s.tar.gz' % int(time.time())))
        zipf = tarfile.open(new_backup, 'w:gz')
        for root, dirs, files in os.walk(db_path):
            for zfilename in files:
                zipf.add(os.path.join(root, zfilename), arcname = 'database/%s' % os.path.join(root[len(db_path) + 1:], zfilename))
        zipf.close()

        # Open last
        db.open()

    else:
        db.create()

    # Force creation of cachedir
    log_dir = sp(log_dir)
    cache_dir = sp(os.path.join(data_dir, 'cache'))
    python_cache = sp(os.path.join(cache_dir, 'python'))

    if not os.path.exists(cache_dir):
        os.mkdir(cache_dir)
    if not os.path.exists(python_cache):
        os.mkdir(python_cache)

    # Register environment settings
    Env.set('app_dir', sp(base_path))
    Env.set('data_dir', sp(data_dir))
    Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log')))
    Env.set('db', db)
    Env.set('cache_dir', cache_dir)
    Env.set('cache', FileSystemCache(python_cache))
    Env.set('console_log', options.console_log)
    Env.set('quiet', options.quiet)
    Env.set('desktop', desktop)
    Env.set('daemonized', options.daemon)
    Env.set('args', args)
    Env.set('options', options)

    # Determine debug
    debug = options.debug or Env.setting('debug', default = False, type = 'bool')
    Env.set('debug', debug)

    # Development
    development = Env.setting('development', default = False, type = 'bool')
    Env.set('dev', development)

    # Disable logging for some modules
    for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']:
        logging.getLogger(logger_name).setLevel(logging.ERROR)

    for logger_name in ['gntp']:
        logging.getLogger(logger_name).setLevel(logging.WARNING)

    # Use reloader
    reloader = debug is True and development and not Env.get('desktop') and not options.daemon

    # Logger
    logger = logging.getLogger()
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m-%d %H:%M:%S')
    level = logging.DEBUG if debug else logging.INFO
    logger.setLevel(level)
    logging.addLevelName(19, 'INFO')

    # To screen
    if (debug or options.console_log) and not options.quiet and not options.daemon:
        hdlr = logging.StreamHandler(sys.stderr)
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)

    # To file
    hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding'))
    hdlr2.setFormatter(formatter)
    logger.addHandler(hdlr2)

    # Start logging & enable colors
    # noinspection PyUnresolvedReferences
    import color_logs
    from couchpotato.core.logger import CPLog
    log = CPLog(__name__)
    log.debug('Started with options %s', options)

    def customwarn(message, category, filename, lineno, file = None, line = None):
        log.warning('%s %s %s line:%s', (category, message, filename, lineno))
    warnings.showwarning = customwarn

    # Create app
    from couchpotato import WebHandler
    web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/'
    Env.set('web_base', web_base)

    api_key = Env.setting('api_key')
    if not api_key:
        api_key = uuid4().hex
        Env.setting('api_key', value = api_key)

    api_base = r'%sapi/%s/' % (web_base, api_key)
    Env.set('api_base', api_base)

    # Basic config
    host = Env.setting('host', default = '0.0.0.0')
    # app.debug = development
    config = {
        'use_reloader': reloader,
        'port': tryInt(Env.setting('port', default = 5050)),
        'host': host if host and len(host) > 0 else '0.0.0.0',
        'ssl_cert': Env.setting('ssl_cert', default = None),
        'ssl_key': Env.setting('ssl_key', default = None),
    }

    # Load the app
    application = Application(
        [],
        log_function = lambda x: None,
        debug = config['use_reloader'],
        gzip = True,
        cookie_secret = api_key,
        login_url = '%slogin/' % web_base,
    )
    Env.set('app', application)

    # Request handlers
    application.add_handlers(".*$", [
        (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler),

        # API handlers
        (r'%s(.*)(/?)' % api_base, ApiHandler),  # Main API handler
        (r'%sgetkey(/?)' % web_base, KeyHandler),  # Get API key
        (r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}),  # API docs

        # Login handlers
        (r'%slogin(/?)' % web_base, LoginHandler),
        (r'%slogout(/?)' % web_base, LogoutHandler),

        # Catch all webhandlers
        (r'%s(.*)(/?)' % web_base, WebHandler),
        (r'(.*)', WebHandler),
    ])

    # Static paths
    static_path = '%sstatic/' % web_base
    for dir_name in ['fonts', 'images', 'scripts', 'style']:
        application.add_handlers(".*$", [
            ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': sp(os.path.join(base_path, 'couchpotato', 'static', dir_name))})
        ])
    Env.set('static_path', static_path)

    # Load configs & plugins
    loader = Env.get('loader')
    loader.preload(root = sp(base_path))
    loader.run()

    # Fill database with needed stuff
    fireEvent('database.setup')
    if not db_exists:
        fireEvent('app.initialize', in_order = True)
    fireEvent('app.migrate')

    # Go go go!
    from tornado.ioloop import IOLoop
    from tornado.autoreload import add_reload_hook
    loop = IOLoop.current()

    # Reload hook
    def test():
        fireEvent('app.shutdown')
    add_reload_hook(test)

    # Some logging and fire load event
    try: log.info('Starting server on port %(port)s', config)
    except: pass
    fireEventAsync('app.load')

    if config['ssl_cert'] and config['ssl_key']:
        server = HTTPServer(application, no_keep_alive = True, ssl_options = {
            'certfile': config['ssl_cert'],
            'keyfile': config['ssl_key'],
        })
    else:
        server = HTTPServer(application, no_keep_alive = True)

    try_restart = True
    restart_tries = 5

    while try_restart:
        try:
            server.listen(config['port'], config['host'])
            loop.start()
        except Exception as e:
            log.error('Failed starting: %s', traceback.format_exc())
            try:
                nr, msg = e
                if nr == 48:
                    log.info('Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds', (config.get('port'), restart_tries))
                    time.sleep(1)
                    restart_tries -= 1

                    if restart_tries > 0:
                        continue
                    else:
                        return
            except:
                pass

            raise

        try_restart = False
Exemplo n.º 15
0
class srDatabase(object):
    _indexes = {}
    _migrate_list = {}

    def __init__(self, name=''):
        self.name = name
        self.old_db_path = ''

        self.db_path = os.path.join(sickrage.DATA_DIR, 'database', self.name)
        self.db = SuperThreadSafeDatabase(self.db_path)

        if self.db.exists():
            self.db.open()

    def initialize(self):
        # Remove database folder if both exists
        if self.db.exists() and os.path.isfile(self.old_db_path):
            self.db.destroy()

        if self.db.exists():
            # Backup before start and cleanup old backups
            backup_path = os.path.join(sickrage.DATA_DIR, 'db_backup',
                                       self.name)
            backup_count = 5
            existing_backups = []
            if not os.path.isdir(backup_path): os.makedirs(backup_path)

            for root, dirs, files in os.walk(backup_path):
                # Only consider files being a direct child of the backup_path
                if root == backup_path:
                    for backup_file in sorted(files):
                        ints = re.findall('\d+', backup_file)

                        # Delete non zip files
                        if len(ints) != 1:
                            try:
                                os.remove(os.path.join(root, backup_file))
                            except:
                                pass
                        else:
                            existing_backups.append(
                                (int(ints[0]), backup_file))
                else:
                    # Delete stray directories.
                    shutil.rmtree(root)

            # Remove all but the last 5
            for eb in existing_backups[:-backup_count]:
                os.remove(os.path.join(backup_path, eb[1]))

            # Create new backup
            new_backup = os.path.join(backup_path,
                                      '%s.tar.gz' % int(time.time()))
            with tarfile.open(new_backup, 'w:gz') as zipf:
                for root, dirs, files in os.walk(self.db_path):
                    for zfilename in files:
                        zipf.add(os.path.join(root, zfilename),
                                 arcname='database/%s' % os.path.join(
                                     root[len(self.db_path) + 1:], zfilename))
        else:
            self.db.create()

        # setup database indexes
        self.setupIndexes()

    def compact(self, try_repair=True, **kwargs):
        # Removing left over compact files
        for f in os.listdir(self.db.path):
            for x in ['_compact_buck', '_compact_stor']:
                if f[-len(x):] == x:
                    os.unlink(os.path.join(self.db.path, f))

        try:
            start = time.time()
            size = float(self.db.get_db_details().get('size', 0))
            sickrage.srCore.srLogger.debug(
                'Compacting {} database, current size: {}MB'.format(
                    self.name, round(size / 1048576, 2)))

            self.db.compact()
            new_size = float(self.db.get_db_details().get('size', 0))
            sickrage.srCore.srLogger.debug(
                'Done compacting {} database in {}s, new size: {}MB, saved: {}MB'
                .format(self.name, round(time.time() - start, 2),
                        round(new_size / 1048576, 2),
                        round((size - new_size) / 1048576, 2)))
        except Exception:
            if try_repair:
                sickrage.srCore.srLogger.error(
                    'Something wrong with indexes, trying repair')

                # Remove all indexes
                old_indexes = self._indexes.keys()
                for index_name in old_indexes:
                    try:
                        self.db.destroy_index(index_name)
                    except IndexNotFoundException:
                        pass
                    except:
                        sickrage.srCore.srLogger.error(
                            'Failed removing old index %s', index_name)

                # Add them again
                for index_name in self._indexes:
                    try:
                        self.db.add_index(self._indexes[index_name](
                            self.db.path, index_name))
                        self.db.reindex_index(index_name)
                    except IndexConflict:
                        pass
                    except:
                        sickrage.srCore.srLogger.error(
                            'Failed adding index %s', index_name)
                        raise

                self.compact(try_repair=False)
            else:
                sickrage.srCore.srLogger.error('Failed compact: {}'.format(
                    traceback.format_exc()))
        except:
            sickrage.srCore.srLogger.error('Failed compact: {}'.format(
                traceback.format_exc()))

    def setupIndexes(self):
        # setup database indexes
        for index_name in self._indexes:
            try:
                # Make sure store and bucket don't exist
                exists = []
                for x in ['buck', 'stor']:
                    full_path = os.path.join(self.db.path,
                                             '%s_%s' % (index_name, x))
                    if os.path.exists(full_path):
                        exists.append(full_path)

                if index_name not in self.db.indexes_names:
                    # Remove existing buckets if index isn't there
                    for x in exists:
                        os.unlink(x)

                    self.db.add_index(self._indexes[index_name](self.db.path,
                                                                index_name))
                    # self.db.reindex_index(self.db.indexes_names[index_name])
                else:
                    # Previous info
                    previous_version = self.db.indexes_names[
                        index_name]._version
                    current_version = self._indexes[index_name]._version

                    # Only edit index if versions are different
                    if previous_version < current_version:
                        self.db.destroy_index(
                            self.db.indexes_names[index_name])
                        self.db.add_index(self._indexes[index_name](
                            self.db.path, index_name))
                        self.db.reindex_index(index_name)
            except:
                sickrage.srCore.srLogger.error(
                    'Failed adding index {}'.format(index_name))

    def close(self):
        self.db.close()

    def check_integrity(self):
        for index_name in self._indexes:
            try:
                for x in self.db.all(index_name):
                    try:
                        self.db.get('id', x.get('_id'), with_doc=True)
                    except (ValueError, TypeError) as e:
                        self.db.delete(
                            self.db.get(index_name,
                                        x.get('key'),
                                        with_doc=True)['doc'])
            except Exception as e:
                if index_name in self.db.indexes_names:
                    self.db.destroy_index(self.db.indexes_names[index_name])

    def migrate(self):
        if os.path.isfile(self.old_db_path):
            sickrage.srCore.srLogger.info('=' * 30)
            sickrage.srCore.srLogger.info(
                'Migrating %s database, please wait...', self.name)
            migrate_start = time.time()

            import sqlite3
            conn = sqlite3.connect(self.old_db_path)

            migrate_data = {}
            rename_old = False

            try:
                c = conn.cursor()

                for ml in self._migrate_list:
                    migrate_data[ml] = {}
                    rows = self._migrate_list[ml]

                    try:
                        c.execute('SELECT {} FROM `{}`'.format(
                            '`' + '`,`'.join(rows) + '`', ml))
                    except:
                        # ignore faulty destination_id database
                        rename_old = True
                        raise

                    for p in c.fetchall():
                        columns = {}
                        for row in self._migrate_list[ml]:
                            columns[row] = p[rows.index(row)]

                        if not migrate_data[ml].get(p[0]):
                            migrate_data[ml][p[0]] = columns
                        else:
                            if not isinstance(migrate_data[ml][p[0]], list):
                                migrate_data[ml][p[0]] = [
                                    migrate_data[ml][p[0]]
                                ]
                            migrate_data[ml][p[0]].append(columns)

                conn.close()

                sickrage.srCore.srLogger.info('Getting data took %s',
                                              (time.time() - migrate_start))

                if not self.db.opened:
                    return

                for t_name in migrate_data:
                    t_data = migrate_data.get(t_name, {})
                    sickrage.srCore.srLogger.info('Importing %s %s' %
                                                  (len(t_data), t_name))
                    for k, v in t_data.items():
                        if isinstance(v, list):
                            for d in v:
                                d.update({'_t': t_name})
                                self.db.insert(d)
                        else:
                            v.update({'_t': t_name})
                            self.db.insert(v)

                sickrage.srCore.srLogger.info('Total migration took %s',
                                              (time.time() - migrate_start))
                sickrage.srCore.srLogger.info('=' * 30)

                rename_old = True
            except OperationalError:
                sickrage.srCore.srLogger.error(
                    'Migrating from unsupported/corrupt %s database version',
                    self.name)
                rename_old = True
            except:
                sickrage.srCore.srLogger.error(
                    'Migration of %s database failed', self.name)

            # rename old database
            if rename_old:
                random = randomString()
                sickrage.srCore.srLogger.info(
                    'Renaming old database to %s.%s_old' %
                    (self.old_db_path, random))
                os.rename(self.old_db_path,
                          '{}.{}_old'.format(self.old_db_path, random))

                if os.path.isfile(self.old_db_path + '-wal'):
                    os.rename(self.old_db_path + '-wal',
                              '{}-wal.{}_old'.format(self.old_db_path, random))
                if os.path.isfile(self.old_db_path + '-shm'):
                    os.rename(self.old_db_path + '-shm',
                              '{}-shm.{}_old'.format(self.old_db_path, random))
Exemplo n.º 16
0
def runCouchPotato(options, base_path, args, data_dir=None, log_dir=None, Env=None, desktop=None):

    try:
        locale.setlocale(locale.LC_ALL, "")
        encoding = locale.getpreferredencoding()
    except (locale.Error, IOError):
        encoding = None

    # for OSes that are poorly configured I'll just force UTF-8
    if not encoding or encoding in ("ANSI_X3.4-1968", "US-ASCII", "ASCII"):
        encoding = "UTF-8"

    Env.set("encoding", encoding)

    # Do db stuff
    db_path = sp(os.path.join(data_dir, "database"))
    old_db_path = os.path.join(data_dir, "couchpotato.db")

    # Remove database folder if both exists
    if os.path.isdir(db_path) and os.path.isfile(old_db_path):
        db = SuperThreadSafeDatabase(db_path)
        db.open()
        db.destroy()

    # Check if database exists
    db = SuperThreadSafeDatabase(db_path)
    db_exists = db.exists()
    if db_exists:

        # Backup before start and cleanup old backups
        backup_path = sp(os.path.join(data_dir, "db_backup"))
        backup_count = 5
        existing_backups = []
        if not os.path.isdir(backup_path):
            os.makedirs(backup_path)

        for root, dirs, files in os.walk(backup_path):
            # Only consider files being a direct child of the backup_path
            if root == backup_path:
                for backup_file in sorted(files):
                    ints = re.findall("\d+", backup_file)

                    # Delete non zip files
                    if len(ints) != 1:
                        try:
                            os.remove(os.path.join(root, backup_file))
                        except:
                            pass
                    else:
                        existing_backups.append((int(ints[0]), backup_file))
            else:
                # Delete stray directories.
                shutil.rmtree(root)

        # Remove all but the last 5
        for eb in existing_backups[:-backup_count]:
            os.remove(os.path.join(backup_path, eb[1]))

        # Create new backup
        new_backup = sp(os.path.join(backup_path, "%s.tar.gz" % int(time.time())))
        zipf = tarfile.open(new_backup, "w:gz")
        for root, dirs, files in os.walk(db_path):
            for zfilename in files:
                zipf.add(
                    os.path.join(root, zfilename),
                    arcname="database/%s" % os.path.join(root[len(db_path) + 1 :], zfilename),
                )
        zipf.close()

        # Open last
        db.open()

    else:
        db.create()

    # Force creation of cachedir
    log_dir = sp(log_dir)
    cache_dir = sp(os.path.join(data_dir, "cache"))
    python_cache = sp(os.path.join(cache_dir, "python"))

    if not os.path.exists(cache_dir):
        os.mkdir(cache_dir)
    if not os.path.exists(python_cache):
        os.mkdir(python_cache)

    session = requests.Session()
    session.max_redirects = 5

    # Register environment settings
    Env.set("app_dir", sp(base_path))
    Env.set("data_dir", sp(data_dir))
    Env.set("log_path", sp(os.path.join(log_dir, "CouchPotato.log")))
    Env.set("db", db)
    Env.set("http_opener", session)
    Env.set("cache_dir", cache_dir)
    Env.set("cache", FileSystemCache(python_cache))
    Env.set("console_log", options.console_log)
    Env.set("quiet", options.quiet)
    Env.set("desktop", desktop)
    Env.set("daemonized", options.daemon)
    Env.set("args", args)
    Env.set("options", options)

    # Determine debug
    debug = options.debug or Env.setting("debug", default=False, type="bool")
    Env.set("debug", debug)

    # Development
    development = Env.setting("development", default=False, type="bool")
    Env.set("dev", development)

    # Disable logging for some modules
    for logger_name in ["enzyme", "guessit", "subliminal", "apscheduler", "tornado", "requests"]:
        logging.getLogger(logger_name).setLevel(logging.ERROR)

    for logger_name in ["gntp"]:
        logging.getLogger(logger_name).setLevel(logging.WARNING)

    # Disable SSL warning
    disable_warnings()

    # Use reloader
    reloader = debug is True and development and not Env.get("desktop") and not options.daemon

    # Logger
    logger = logging.getLogger()
    formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%m-%d %H:%M:%S")
    level = logging.DEBUG if debug else logging.INFO
    logger.setLevel(level)
    logging.addLevelName(19, "INFO")

    # To screen
    if (debug or options.console_log) and not options.quiet and not options.daemon:
        hdlr = logging.StreamHandler(sys.stderr)
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)

    # To file
    hdlr2 = handlers.RotatingFileHandler(Env.get("log_path"), "a", 500000, 10, encoding=Env.get("encoding"))
    hdlr2.setFormatter(formatter)
    logger.addHandler(hdlr2)

    # Start logging & enable colors
    # noinspection PyUnresolvedReferences
    import color_logs
    from couchpotato.core.logger import CPLog

    log = CPLog(__name__)
    log.debug("Started with options %s", options)

    # Check soft-chroot dir exists:
    try:
        # Load Soft-Chroot
        soft_chroot = Env.get("softchroot")
        soft_chroot_dir = Env.setting("soft_chroot", section="core", default=None, type="unicode")
        soft_chroot.initialize(soft_chroot_dir)
    except SoftChrootInitError as exc:
        log.error(exc)
        return
    except:
        log.error("Unable to check whether SOFT-CHROOT is defined")
        return

    # Check available space
    try:
        total_space, available_space = getFreeSpace(data_dir)
        if available_space < 100:
            log.error(
                "Shutting down as CP needs some space to work. You'll get corrupted data otherwise. Only %sMB left",
                available_space,
            )
            return
    except:
        log.error("Failed getting diskspace: %s", traceback.format_exc())

    def customwarn(message, category, filename, lineno, file=None, line=None):
        log.warning("%s %s %s line:%s", (category, message, filename, lineno))

    warnings.showwarning = customwarn

    # Create app
    from couchpotato import WebHandler

    web_base = ("/" + Env.setting("url_base").lstrip("/") + "/") if Env.setting("url_base") else "/"
    Env.set("web_base", web_base)

    api_key = Env.setting("api_key")
    if not api_key:
        api_key = uuid4().hex
        Env.setting("api_key", value=api_key)

    api_base = r"%sapi/%s/" % (web_base, api_key)
    Env.set("api_base", api_base)

    # Basic config
    host = Env.setting("host", default="0.0.0.0")
    host6 = Env.setting("host6", default="::")

    config = {
        "use_reloader": reloader,
        "port": tryInt(Env.setting("port", default=5050)),
        "host": host if host and len(host) > 0 else "0.0.0.0",
        "host6": host6 if host6 and len(host6) > 0 else "::",
        "ssl_cert": Env.setting("ssl_cert", default=None),
        "ssl_key": Env.setting("ssl_key", default=None),
    }

    # Load the app
    application = Application(
        [],
        log_function=lambda x: None,
        debug=config["use_reloader"],
        gzip=True,
        cookie_secret=api_key,
        login_url="%slogin/" % web_base,
    )
    Env.set("app", application)

    # Request handlers
    application.add_handlers(
        ".*$",
        [
            (r"%snonblock/(.*)(/?)" % api_base, NonBlockHandler),
            # API handlers
            (r"%s(.*)(/?)" % api_base, ApiHandler),  # Main API handler
            (r"%sgetkey(/?)" % web_base, KeyHandler),  # Get API key
            (r"%s" % api_base, RedirectHandler, {"url": web_base + "docs/"}),  # API docs
            # Login handlers
            (r"%slogin(/?)" % web_base, LoginHandler),
            (r"%slogout(/?)" % web_base, LogoutHandler),
            # Catch all webhandlers
            (r"%s(.*)(/?)" % web_base, WebHandler),
            (r"(.*)", WebHandler),
        ],
    )

    # Static paths
    static_path = "%sstatic/" % web_base
    for dir_name in ["fonts", "images", "scripts", "style"]:
        application.add_handlers(
            ".*$",
            [
                (
                    "%s%s/(.*)" % (static_path, dir_name),
                    StaticFileHandler,
                    {"path": sp(os.path.join(base_path, "couchpotato", "static", dir_name))},
                )
            ],
        )
    Env.set("static_path", static_path)

    # Load configs & plugins
    loader = Env.get("loader")
    loader.preload(root=sp(base_path))
    loader.run()

    # Fill database with needed stuff
    fireEvent("database.setup")
    if not db_exists:
        fireEvent("app.initialize", in_order=True)
    fireEvent("app.migrate")

    # Go go go!
    from tornado.ioloop import IOLoop
    from tornado.autoreload import add_reload_hook

    loop = IOLoop.current()

    # Reload hook
    def reload_hook():
        fireEvent("app.shutdown")

    add_reload_hook(reload_hook)

    # Some logging and fire load event
    try:
        log.info("Starting server on port %(port)s", config)
    except:
        pass
    fireEventAsync("app.load")

    ssl_options = None
    if config["ssl_cert"] and config["ssl_key"]:
        ssl_options = {"certfile": config["ssl_cert"], "keyfile": config["ssl_key"]}

    server = HTTPServer(application, no_keep_alive=True, ssl_options=ssl_options)

    try_restart = True
    restart_tries = 5

    while try_restart:
        try:
            if config["host"].startswith("unix:"):
                server.add_socket(bind_unix_socket(config["host"][5:]))
            else:
                server.listen(config["port"], config["host"])

                if Env.setting("ipv6", default=False):
                    try:
                        server.listen(config["port"], config["host6"])
                    except:
                        log.info2("Tried to bind to IPV6 but failed")

            loop.start()
            server.close_all_connections()
            server.stop()
            loop.close(all_fds=True)
        except Exception as e:
            log.error("Failed starting: %s", traceback.format_exc())
            try:
                nr, msg = e
                if nr == 48:
                    log.info(
                        "Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds",
                        (config.get("port"), restart_tries),
                    )
                    time.sleep(1)
                    restart_tries -= 1

                    if restart_tries > 0:
                        continue
                    else:
                        return
            except ValueError:
                return
            except:
                pass

            raise

        try_restart = False
Exemplo n.º 17
0
class CodernityDbStorage(ProviderStorage, Plugin):
    __key__ = 'codernitydb'

    def __init__(self, path=None):
        super(CodernityDbStorage, self).__init__()

        self.path = path

        self.database = SuperThreadSafeDatabase(path)

        if os.path.exists(path):
            self.database.open()

    #
    # Provider methods
    #

    def create(self, source, target):
        if os.path.exists(self.path):
            return True

        # Create database
        self.database.create()

        # Add indices
        self.database.add_index(CollectionKeyIndex(self.database.path,      'collection_key'))
        self.database.add_index(MetadataKeyIndex(self.database.path,        'metadata_key'))
        self.database.add_index(MetadataCollectionIndex(self.database.path, 'metadata_collection'))
        self.database.add_index(ItemKeyIndex(self.database.path,            'item_key'))

        return True

    def open_database(self, source, target, database=None):
        return ModelRegistry['Database'].load(
            DatabaseCodernityDbStorage.open(self, source, target, database),
            source, target
        )

    #
    # Collection methods
    #

    def has_collection(self, source, target):
        try:
            self.database.get('collection_key', (source, target))
            return True
        except RecordNotFound:
            pass

        return False

    def get_collection_version(self, source, target):
        try:
            item = self.database.get('collection_key', (source, target), with_doc=True)

            if not item or 'doc' not in item:
                return None

            return Version(item['doc'].get('version'))
        except RecordNotFound:
            pass

        return None

    @Elapsed.track
    def update_collection(self, source, target, version):
        # Retrieve current item
        try:
            current = self.database.get('collection_key', (source, target), with_doc=True)

            if 'doc' in current:
                current = current['doc']
            else:
                current = None
        except RecordNotFound:
            current = None

        # Build collection metadata
        item = {
            '_': {
                't': 'collection',

                'c': {
                    's': source,
                    't': target
                }
            },

            # Collection attributes
            'version': str(version)
        }

        # Add current item identifier
        if current and '_id' in current and '_rev' in current:
            item['_id'] = current['_id']
            item['_rev'] = current['_rev']

        # Update database
        if current:
            # Update existing item
            try:
                self.database.update(item)
            except Exception, ex:
                log.warn('Unable to update collection: %s', ex, exc_info=True)
                return False

            log.debug('[%s -> %s] Updated collection', source, target)
            return True

        # Insert new item
        try:
            self.database.insert(item)
        except Exception, ex:
            log.warn('Unable to insert collection: %s', ex, exc_info=True)
            return False