예제 #1
0
 def event_notify(a, b, c):
     if b == db.DB_EVENT_REP_MASTER:
         self.master = self.local_site
         # notify sub-processes
         services.notify(('NEW_MASTER', self.local_site))
         self.broadcast(MgtMessage('REP_NEW_MASTER', self.local_site))
         logger.info('REP: Node elected as new MASTER')
     elif b == db.DB_EVENT_REP_STARTUPDONE:
         self.client_startup_done = True
         logger.info('REP: Replication client startup is finished')
예제 #2
0
 def _import_items():
     for dbfile in dbfiles:
         logger.info('Importing object ' + os.path.basename(dbfile))
         fn = '%s/%s' % (self.tmp_folder, dbfile)
         self.package_file.extract(dbfile, self.tmp_folder)
         objfile = None
         try:
             objfile = open(fn, 'rb')
             self._import_item(objfile)
         finally:
             if objfile:
                 objfile.close()
예제 #3
0
    def _import_item(self, fileobj):
        stream = fileobj.read()
        item = persist.loads(stream)

        # TODO: remove next block
        # kept for backwards compatibility
        if hasattr(item, '_parentid'):
            pid = item._parentid
            delattr(item, '_parentid')
            item._pid = pid
        if hasattr(item, '_containerid'):
            pid = item._containerid
            delattr(item, '_containerid')
            item._pid = pid

        #check if the item already exists
        old_item = db._db.get_item(item.id)
        if old_item is None:
            # write external attributes
            for prop in [getattr(item, x) for x in item.__props__
                         if hasattr(item, x)]:
                if isinstance(prop, datatypes.ExternalAttribute):
                    prop._isDirty = True
                    prop._eventHandler.on_create(item, prop)
            # update parent's modification date
            p = db._db.get_item(item._pid)
            if p is not None:
                p.modified = time.time()
                db._db.put_item(p)
            db._db.put_item(item)
        else:
            logger.info('Item "%s" already exists. Upgrading object...' %
                        item.displayName.value)
            item.displayName.value = old_item.displayName.value
            item.description.value = old_item.description.value
            item.inheritRoles = old_item.inheritRoles
            item.modifiedBy = old_item.modifiedBy
            item.modified = old_item.modified
            item._created = old_item._created
            item.security = old_item.security
            db._db.put_item(item)
예제 #4
0
    def __init__(self, env, config):
        self.env = env
        self.client_startup_done = False
        self.config = config

        address = misc.get_address_from_string(self.config['address'])
        if 'management' in services:
            mgt_address = services['management'].addr
        else:
            mgt_address = None
        if 'main' in services:
            req_address = services['main'].addr
        else:
            req_address = None
        self.local_site = Site(address, mgt_address, req_address)

        if self.config['priority'] == 0:
            self.role = db.DB_REP_CLIENT
        else:
            role = self.config.get('role', 'CLIENT')
            self.role = getattr(db, 'DB_REP_%s' % role)

        if self.role == db.DB_REP_ELECTION:
            logger.info(
                'REP: Starting replication manager and calling for election')
        elif self.role == db.DB_REP_CLIENT:
            logger.info('REP: Starting replication manager as a client')
        elif self.role == db.DB_REP_MASTER:
            logger.info('REP: Starting replication manager as MASTER')

        if self.role == db.DB_REP_MASTER:
            self.master = self.local_site
예제 #5
0
class DB(object):
    "Berkeley DB database interface"
    # data dir
    dir = os.path.abspath(settings['store']['bdb_data_dir'])
    if dir[-1] != '/':
        dir += '/'
    # log_dir
    log_dir = os.path.abspath(settings['store'].get('bdb_log_dir', dir))
    if log_dir[-1] != '/':
        log_dir += '/'
    # environment files directory
    env_dir = os.path.abspath(settings['store'].get(
        'env_dir', os.path.abspath(settings['global']['temp_folder'])))
    if env_dir[-1] != '/':
        env_dir += '/'
    # cache size
    cache_size = settings['store'].get('cache_size', None)
    # maximum concurrent transactions
    # due to snapshot isolation this should be kept high enough
    txn_max = settings['store'].get('max_tx', 1000)
    # transaction timeout
    txn_timeout = settings['store'].get('tx_timeout', None)
    # shared memory key
    shm_key = settings['store'].get('shm_key', None)
    # maintenance (deadlock detector) thread
    _maintenance_thread = None
    # checkpoint thread
    _checkpoint_thread = None
    # trickle thread
    _trickle_thread = None

    # log berkeleyDB version
    logger.info('BerkeleyDB version is %s' %
                '.'.join(str(x) for x in db.version()))

    def __init__(self, **kwargs):
        # create db environment
        additional_flags = kwargs.get('flags', 0)
        recovery_mode = kwargs.get('recover', 0)
        if recovery_mode == 2:
            additional_flags |= db.DB_RECOVER_FATAL
        elif recovery_mode == 1:
            additional_flags |= db.DB_RECOVER
            if hasattr(db, 'DB_REGISTER'):
                additional_flags |= db.DB_REGISTER

        self._env = db.DBEnv()
        # ability to override settings' dir for testing purposes
        data_dir = kwargs.get('dir', self.dir)
        self._env.set_data_dir(data_dir)
        # ability to override settings' log_dir for testing purposes
        log_dir = kwargs.get('log_dir', kwargs.get('dir', self.log_dir))
        self._env.set_lg_dir(log_dir)
        self._env.set_tx_max(self.txn_max)

        if self.txn_timeout is not None:
            if self.txn_timeout > 0:
                self._env.set_timeout(self.txn_timeout, db.DB_SET_TXN_TIMEOUT)
            else:
                self._env.set_flags(db.DB_TXN_NOWAIT, 1)

        if self.cache_size is not None:
            self._env.set_cachesize(*self.cache_size)

        if os.name != 'nt' and self.shm_key:
            self._env.set_shm_key(self.shm_key)
            additional_flags |= db.DB_SYSTEM_MEM

        # replication settings
        rep_config = settings['store'].get('rep_config', None)
        init_rep = kwargs.get('init_rep', False)

        if rep_config:
            # in replicated environments use non-durable transactions
            self._env.set_flags(db.DB_TXN_NOSYNC, 1)
            additional_flags |= db.DB_INIT_REP

        self._env.open(
            self.env_dir,
            db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG
            | db.DB_INIT_TXN | db.DB_CREATE | additional_flags)

        db_flags = db.DB_THREAD | db.DB_AUTO_COMMIT | db.DB_CREATE | \
                   db.DB_MULTIVERSION
        db_mode = 0o660

        if rep_config:
            from porcupine.db.bsddb.replication import ReplicationService

            # initialiaze replication service
            self.replication_service = \
                ReplicationService(self._env, rep_config)

            if init_rep:
                # check multiprocessing
                is_multiprocess = services['main'].is_multiprocess or \
                                  services['management'].is_multiprocess

                if is_multiprocess and int(rep_config['priority']) > 0 \
                        and db.version() < (4, 8):
                    self._env.close()
                    self.__remove_env()
                    raise exceptions.ConfigurationError(
                        'Multiprocessing master candidates ' +
                        'require BerkeleyDB 4.8 or higher')

                # start replication service
                self.replication_service.start()

                # wait for client start-up
                timeout = time.time() + 20
                while time.time() < timeout and \
                        not self.replication_service.is_master() and \
                        not self.replication_service.client_startup_done:
                    time.sleep(0.02)

                timeout = time.time() + 20
                while time.time() < timeout and \
                        not (os.path.exists(
                             os.path.join(self.dir, 'porcupine.db'))):
                    time.sleep(0.02)
        else:
            self.replication_service = None

        # open items db
        while True:
            self._itemdb = db.DB(self._env)
            self._itemdb.set_pagesize(2048)
            try:
                self._itemdb.open('porcupine.db',
                                  'items',
                                  dbtype=db.DB_BTREE,
                                  mode=db_mode,
                                  flags=db_flags)
            except db.DBLockDeadlockError:
                self._itemdb.close()
                continue
            break

        # open documents db
        while True:
            self._docdb = db.DB(self._env)
            try:
                self._docdb.open('porcupine.db',
                                 'docs',
                                 dbtype=db.DB_HASH,
                                 mode=db_mode,
                                 flags=db_flags)
            except db.DBLockDeadlockError:
                self._docdb.close()
                continue
            break

        # open indices
        self._indices = {}
        for name, unique, immutable in settings['store']['indices']:
            self._indices[name] = DbIndex(self._env, self._itemdb, name,
                                          unique, immutable, db_flags)

        self._running = True

        maintain = kwargs.get('maintain', False)
        if maintain and self._maintenance_thread is None:
            # start deadlock detector
            self._maintenance_thread = Thread(target=self.__maintain,
                                              name='DB maintenance thread')
            self._maintenance_thread.start()
            # start checkpoint thread
            self._checkpoint_thread = Thread(target=self.__checkpoint,
                                             name='DB checkpoint thread')
            self._checkpoint_thread.start()
            #if hasattr(self._env, 'memp_trickle'):
            #    # strart memp_trickle thread
            #    self._trickle_thread = Thread(target=self.__trickle,
            #                                  name='DB memp_trickle thread')
            #    self._trickle_thread.start()

    def is_open(self):
        return self._running

    # item operations
    def get_item(self, oid):
        if type(oid) != bytes:
            oid = oid.encode('utf-8')
        try:
            return self._indices['_id'].db.get(oid,
                                               txn=context._trans
                                               and context._trans.txn)
        except UnicodeEncodeError:
            return None
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            if context._trans is not None:
                context._trans.abort()
            raise exceptions.DBRetryTransaction

    def put_item(self, item):
        try:
            self._itemdb.put(
                pack_value(item._pid) + b'_' + pack_value(item._id),
                persist.dumps(item), context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction
        except db.DBError as e:
            if e.args[0] == _err_unsupported_index_type:
                raise db.DBError('Unsupported indexed data type')
            else:
                raise

    def delete_item(self, item):
        try:
            self._itemdb.delete(
                pack_value(item._pid) + b'_' + pack_value(item._id),
                context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction

    # containers
    def get_children(self, container_id):
        cursor = Cursor(self._itemdb, '_pid')
        cursor.set_scope(container_id)
        cursor.set_range(None, None)
        return cursor

    def get_child_by_name(self, container_id, name):
        try:
            return self._indices['displayName'].db.get(
                pack_value(container_id) + b'_' + pack_value(name),
                txn=context._trans and context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            if context._trans is not None:
                context._trans.abort()
            raise exceptions.DBRetryTransaction

    # external attributes
    def get_external(self, id):
        try:
            return self._docdb.get(id.encode('ascii'),
                                   txn=context._trans and context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            if context._trans is not None:
                context._trans.abort()
            raise exceptions.DBRetryTransaction

    def put_external(self, id, stream):
        try:
            self._docdb.put(id.encode('ascii'), stream, context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction

    def delete_external(self, id):
        try:
            self._docdb.delete(id.encode('ascii'), context._trans.txn)
        except db.DBNotFoundError:
            # virtual external due to elastic schema
            pass
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction

    # indices
    def get_cursor_list(self, conditions):
        cur_list = []
        for index, value in conditions:
            cursor = Cursor(self._indices[index].db, self._indices[index].name)
            if isinstance(value, (list, tuple)):
                reversed = (len(value) == 3 and value[2])
                cursor.set_range(value[0], value[1])
                if reversed:
                    cursor.reverse()
            else:
                cursor.set(value)
            cur_list.append(cursor)
        return cur_list

    def query(self, conditions):
        cur_list = self.get_cursor_list(conditions)
        if len(cur_list) == 1:
            return cur_list[0]
        else:
            c_join = Join(self._itemdb, cur_list)
            return c_join

    def test_conditions(self, scope, conditions):
        cur_list = self.get_cursor_list(conditions)
        if len(cur_list) == 1:
            cursor = cur_list[0]
        else:
            cursor = Join(self._itemdb, cur_list)
        cursor.set_scope(scope)
        iterator = iter(cursor)
        try:
            result = bool(next(iterator))
        except StopIteration:
            result = False
        cursor.close()
        return result

    # transactions
    def get_transaction(self, **kwargs):
        nosync = kwargs.get('nosync', False)
        snapshot = kwargs.get('snapshot', False)
        return Transaction(self._env, nosync, snapshot)

    def __remove_env(self):
        files = glob.glob(self.env_dir + '__db.*')
        for file in files:
            try:
                os.remove(file)
            except OSError:
                pass

    # administrative
    def __remove_files(self):
        # environment files
        self.__remove_env()
        # log files
        files = glob.glob(self.log_dir + 'log.*')
        for file in files:
            os.remove(file)
        # database file
        os.remove(self.dir + 'porcupine.db')
        # index file
        os.remove(self.dir + 'porcupine.idx')

    def truncate(self):
        # older versions of bsddb do not support truncate
        if hasattr(self._itemdb, 'truncate'):
            self._itemdb.truncate()
            self._docdb.truncate()
        else:
            # close database
            self.close()
            # remove old database files
            self.__remove_files()
            # open db
            self.__init__()

    def backup(self, output_file):
        # force checkpoint
        self._env.txn_checkpoint(0, 0, db.DB_FORCE)
        logs = self._env.log_archive(db.DB_ARCH_LOG)
        backfiles = [self.dir + 'porcupine.db', self.dir + 'porcupine.idx'] + \
                    [self.log_dir + log.decode() for log in logs]
        # compact backup....
        backup = BackupFile(output_file)
        backup.add_files(backfiles)

    def restore(self, bset):
        self.__remove_files()
        backup = BackupFile(bset)
        backup.extract(self.dir, self.log_dir)

    def shrink(self):
        logs = self._env.log_archive()
        for log in logs:
            os.remove(self.log_dir + log)
        return len(logs)

    def __maintain(self):
        "deadlock detection thread"
        while self._running:
            time.sleep(0.01)
            # deadlock detection
            try:
                aborted = self._env.lock_detect(db.DB_LOCK_YOUNGEST)
                if aborted:
                    logger.critical(
                        "Deadlock: Aborted %d deadlocked transaction(s)" %
                        aborted)
            except db.DBError:
                pass

    def __trickle(self):
        "memp_trickle thread"
        while self._running:
            self._env.memp_trickle(95)
            time.sleep(120)

    def __checkpoint(self):
        "checkpoint thread"
        while self._running:
            if self.replication_service is None \
                    or self.replication_service.is_master():
                # checkpoint every 512KB written
                self._env.txn_checkpoint(512, 0)
            time.sleep(16)

            #stats = self._env.txn_stat()
            #print('txns: %d' % stats['nactive'])
            #print('max txns: %d' % stats['maxnactive'])
            #print()
            #stats = self._env.lock_stat()
            #print('Lockers: %d' % stats['nlockers'])
            #print('Max Lockers: %d' % stats['maxnlockers'])
            #print('Lockers wait: %d' % stats['lockers_wait'])
            #print()
            #print('Locks: %d' % stats['nlocks'])
            #print('Max Locks: %d' % stats['maxnlocks'])
            #print('Locks wait: %d' % stats['lock_wait'])
            #print('Locks no-wait: %d' % stats['lock_nowait'])
            #print()
            #print('Lock objects: %d' % stats['nobjects'])
            #print('Max objects: %d' % stats['maxnobjects'])
            #print('Objects wait: %d' % stats['objs_wait'])
            #print()
            #print('Requested: %d' % stats['nrequests'])
            #print('Released: %d' % stats['nreleases'])
            #print('-' * 80)

    def close(self, **kwargs):
        if self._running:
            self._running = False

            # join threads
            if self._maintenance_thread is not None:
                self._maintenance_thread.join()
            if self._checkpoint_thread is not None:
                self._checkpoint_thread.join()
            if self._trickle_thread is not None:
                self._trickle_thread.join()

            self._itemdb.close()
            self._docdb.close()
            # close indexes
            [index.close() for index in self._indices.values()]
            self._env.close()
            # clean-up environment files
            if (self._maintenance_thread is not None
                    or kwargs.get('clear_env', False)):
                self.__remove_env()
예제 #6
0
    def create(self):
        # files
        files = self.config_file.options('files')
        for fl in files:
            fname = self.config_file.get('files', fl)
            logger.info('Adding file ' + fname)
            self.package_files.append((
                self.package_file.gettarinfo(fname, fname), fname))

        # directories
        dirs = self.config_file.options('dirs')
        for dir in dirs:
            dirname = self.config_file.get('dirs', dir)
            logger.info('Adding directory ' + dirname)
            self._addtree(dirname)

        # published directories
        if self.config_file.has_section('pubdir'):
            pubdirs = self.config_file.options('pubdir')
            dirsConfig = configfiles.PubDirManager()

            dir_nodes = []
            for dir in pubdirs:
                dirname = self.config_file.get('pubdir', dir)
                logger.info('Adding published directory "%s"' % dirname)
                dir_node = dirsConfig.getDirNode(dirname)
                if dir_node:
                        dir_nodes.append(dir_node)
                        dir_location = dir_node.getAttribute('path')
                        self._addtree(dir_location)
                else:
                    logger.warning('Published directory "%s" does not exist'
                                   % appname)

            if dir_nodes:
                dirsFile = open(self.tmp_folder + '/_pubdir.xml', 'w')
                dirsFile.write('<?xml version="1.0" encoding="utf-8"?><dirs>')
                for dir_node in dir_nodes:
                    dirsFile.write(dir_node.toxml('utf-8'))
                dirsFile.write('</dirs>')
                dirsFile.close()
                self.package_files.append((
                    self.package_file.gettarinfo(
                        dirsFile.name, os.path.basename(dirsFile.name)),
                    dirsFile.name))
            dirsConfig.close(False)

        # database items
        items = self.config_file.options('items')
        itemids = [self.config_file.get('items', x) for x in items]
        for itemid in itemids:
            item = db._db.get_item(itemid)
            self._export_item(item)

        # scripts
        if self.config_file.has_option('scripts', 'preinstall'):
            preinstall = self.config_file.get('scripts', 'preinstall')
            logger.info('Adding pre-install script "%s"' % preinstall)
            self.package_files.append((
                self.package_file.gettarinfo(preinstall, '_pre.py'),
                preinstall))

        if self.config_file.has_option('scripts', 'postinstall'):
            postinstall = self.config_file.get('scripts', 'postinstall')
            logger.info('Adding post-install script "%s"' % postinstall)
            self.package_files.append((
                self.package_file.gettarinfo(postinstall, '_post.py'),
                postinstall))

        if self.config_file.has_option('scripts', 'uninstall'):
            uninstall = self.config_file.get('scripts', 'uninstall')
            logger.info('Adding uninstall script "%s"' % uninstall)
            self.package_files.append((
                self.package_file.gettarinfo(uninstall, '_uninstall.py'),
                uninstall))

        # definition file
        self.package_files.append((
                self.package_file.gettarinfo(self.ini_file, '_pkg.ini'),
                self.ini_file))

        # compact files
        logger.info('Compacting...')
        for tarinfo, fname in self.package_files:
            if tarinfo.isfile():
                self.package_file.addfile(tarinfo, open(fname, 'rb'))
                # remove temporary
                if fname[:len(self.tmp_folder)] == self.tmp_folder:
                    os.remove(fname)
            else:
                if type(fname) == str:  # unicode
                    fname = fname.encode('utf-8')
                self.package_file.add(fname)
예제 #7
0
 def _remove_items():
     for itemid in itemids:
         item = db._db.get_item(itemid)
         if item is not None:
             logger.info('Removing object %s' % itemid)
             item._delete()
예제 #8
0
    def uninstall(self):
        logger.info('Uninstalling [%s-%s] package...' %
                    (self.name, self.version))

        # database items
        items = self.config_file.options('items')
        itemids = [self.config_file.get('items', x) for x in items]

        if itemids:

            @db.transactional(auto_commit=True)
            def _remove_items():
                for itemid in itemids:
                    item = db._db.get_item(itemid)
                    if item is not None:
                        logger.info('Removing object %s' % itemid)
                        item._delete()

            _remove_items()

        # uninstall script
        contents = self.package_file.getnames()
        if '_uninstall.py' in contents:
            logger.info('Running uninstallation script...')
            self.package_file.extract('_uninstall.py', self.tmp_folder)
            self._execute_script(self.tmp_folder + '/_uninstall.py',
                                 'Uninstall script')
            os.remove(self.tmp_folder + '/_uninstall.py')

        # files
        files = self.config_file.options('files')
        for fl in files:
            fname = self.config_file.get('files', fl)
            logger.info('Removing file ' + fname)
            if os.path.exists(fname):
                os.remove(fname)
            # check if it is a python file
            if fname[-3:] == '.py':
                [os.remove(fname + x)
                 for x in ('c', 'o')
                 if os.path.exists(fname + x)]

        # directories
        dirs = self.config_file.options('dirs')
        for dir in dirs:
            dir_path = self.config_file.get('dirs', dir)
            if os.path.exists(dir_path):
                logger.info('Removing directory ' + dir_path)
                shutil.rmtree(dir_path, True)

        # published dirs
        if '_pubdir.xml' in contents:
            logger.info('Uninstalling published directories...')
            dirsfile = self.package_file.extractfile('_pubdir.xml')
            _dom = minidom.parse(dirsfile)
            dirsConfig = configfiles.PubDirManager()
            dir_nodes = _dom.getElementsByTagName('dir')
            for dir_node in dir_nodes:
                #app_node = app_node.cloneNode(True)
                dir_name = dir_node.getAttribute('name')
                logger.info('Uninstalling published directory "%s"' % dir_name)
                old_node = dirsConfig.getDirNode(dir_name)
                if old_node:
                    dirsConfig.removeDirNode(old_node)
                else:
                    logger.warning('Published directory "%s" does not exist'
                                    % dir_name)
                # update published directories
                if dir_name in pubdirs.dirs:
                    del pubdirs.dirs[dir_name]
                    # remove published directory in
                    # multi-processing enviroments
                    services.notify(('REMOVE_PUBDIR', dir_name))

                dir_path = dir_node.getAttribute('path')
                if os.path.exists(dir_path):
                    shutil.rmtree(dir_path, True)

            _dom.unlink()
            dirsConfig.close(True)
예제 #9
0
    def install(self):
        logger.info('Installing [%s-%s] package...' %
                    (self.name, self.version))
        contents = self.package_file.getnames()

        # pre-install script
        if '_pre.py' in contents:
            logger.info('Running pre installation script...')
            self.package_file.extract('_pre.py', self.tmp_folder)
            self._execute_script(self.tmp_folder + '/_pre.py',
                                 'Pre-installation script')
            os.remove(self.tmp_folder + '/_pre.py')

        # files and dirs
        for pfile in [x for x in contents if x[0] != '_']:
            logger.info('Extracting ' + pfile)
            self.package_file.extract(pfile)

        # published directories
        if '_pubdir.xml' in contents:
            logger.info('Installing published directories...')
            dirsfile = self.package_file.extractfile('_pubdir.xml')
            _dom = minidom.parse(dirsfile)
            dirsConfig = configfiles.PubDirManager()
            dir_nodes = _dom.getElementsByTagName('dir')
            for dir_node in dir_nodes:
                dir_node = dir_node.cloneNode(True)
                dir_name = dir_node.getAttribute('name')
                logger.info('Installing published directory "%s"' % dir_name)
                old_node = dirsConfig.getDirNode(dir_name)
                if old_node:
                    dirsConfig.replaceDirNode(dir_node, old_node)
                else:
                    dirsConfig.addDirNode(dir_node)
                # update published directories
                dir = pubdirs.Dir(dir_node)
                pubdirs.dirs[dir_name] = dir
                # add published directory in multi-processing enviroments
                services.notify(('ADD_PUBDIR', (dir_name, dir)))

            _dom.unlink()
            dirsConfig.close(True)

        # database
        dbfiles = [x for x in contents if x[:4] == '_db/']
        if dbfiles:

            @db.transactional(auto_commit=True)
            def _import_items():
                for dbfile in dbfiles:
                    logger.info('Importing object ' + os.path.basename(dbfile))
                    fn = '%s/%s' % (self.tmp_folder, dbfile)
                    self.package_file.extract(dbfile, self.tmp_folder)
                    objfile = None
                    try:
                        objfile = open(fn, 'rb')
                        self._import_item(objfile)
                    finally:
                        if objfile:
                            objfile.close()

            # import objects
            try:
                _import_items()
            finally:
                if os.path.exists(self.tmp_folder + '/_db'):
                    shutil.rmtree(self.tmp_folder + '/_db', True)

        # post-install script
        if '_post.py' in contents:
            logger.info('Running post installation script...')
            self.package_file.extract('_post.py', self.tmp_folder)
            self._execute_script(self.tmp_folder + '/_post.py',
                                 'Post-installation script')
            os.remove(self.tmp_folder + '/_post.py')