Ejemplo n.º 1
0
    def __init__(self,
                 cache_path: str = '',
                 settings_path='',
                 check=IntegrityCheckType['full']):
        self._conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf)

        self.db_path = os.path.join(cache_path,
                                    self._conf['sqlite']['filename'])
        self.tl = local()

        self.integrity_check(check)
        self.init()

        self._conn.create_function('REGEXP', _regex_match.__code__.co_argcount,
                                   _regex_match)

        with cursor(self._conn) as c:
            c.execute(_ROOT_ID_SQL)
            row = c.fetchone()
            if not row:
                self.root_id = ''
                return
            first_id = row['id']

            if c.fetchone():
                raise IntegrityError('Could not uniquely identify root node.')

            self.root_id = first_id

        self._execute_pragma('busy_timeout',
                             self._conf['sqlite']['busy_timeout'])
        self._execute_pragma('journal_mode',
                             self._conf['sqlite']['journal_mode'])
Ejemplo n.º 2
0
Archivo: db.py Proyecto: pep1/acd_cli
    def __init__(self, cache_path: str='', settings_path='', check=IntegrityCheckType['full']):
        self._conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf)

        self.db_path = os.path.join(cache_path, self._conf['sqlite']['filename'])
        self.tl = local()

        self.integrity_check(check)
        self.init()

        self._conn.create_function('REGEXP', _regex_match.__code__.co_argcount, _regex_match)

        self.path_to_node_id = {}
        self.path_to_node_id_lock = Lock()
        """There are a huge number of repeated path lookups,
        so cache results and invalidate on new nodes."""

        with cursor(self._conn) as c:
            c.execute(_ROOT_ID_SQL)
            row = c.fetchone()
            if not row:
                self.root_id = ''
                return
            first_id = row['id']

            if c.fetchone():
                raise IntegrityError('Could not uniquely identify root node.')

            self.root_id = first_id

        self._execute_pragma('busy_timeout', self._conf['sqlite']['busy_timeout'])
        self._execute_pragma('journal_mode', self._conf['sqlite']['journal_mode'])
Ejemplo n.º 3
0
    def remove_db_file(cls, cache_path='', settings_path='') -> bool:
        """Removes database file."""

        import os
        import random
        import string
        import tempfile

        conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf)
        db_path = os.path.join(cache_path, conf['sqlite']['filename'])

        tmp_name = ''.join(
            random.choice(string.ascii_lowercase) for _ in range(16))
        tmp_name = os.path.join(tempfile.gettempdir(), tmp_name)

        try:
            os.rename(db_path, tmp_name)
        except OSError:
            logger.critical('Error renaming/removing database file "%s".' %
                            db_path)
            return False
        else:
            try:
                os.remove(tmp_name)
            except OSError:
                logger.info('Database file was moved, but not deleted.')
        return True
Ejemplo n.º 4
0
    def remove_db_file(cls, cache_path='', settings_path='') -> bool:
        """Removes database file."""

        import os
        import random
        import string
        import tempfile

        conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf)
        db_path = os.path.join(cache_path, conf['sqlite']['filename'])

        tmp_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(16))
        tmp_name = os.path.join(tempfile.gettempdir(), tmp_name)

        try:
            os.rename(db_path, tmp_name)
        except OSError:
            logger.critical('Error renaming/removing database file "%s".' % db_path)
            return False
        else:
            try:
                os.remove(tmp_name)
            except OSError:
                logger.info('Database file was moved, but not deleted.')
        return True
Ejemplo n.º 5
0
    def __init__(self, cache_path: str='', settings_path='', check=IntegrityCheckType['full']):
        self._conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf)

        self.db_path = os.path.join(cache_path, self._conf['sqlite']['filename'])
        self.tl = local()

        self.integrity_check(check)
        try:
            self.init()
        except sqlite3.DatabaseError as e:
            raise IntegrityError(e)

        self._conn.create_function('REGEXP', _regex_match.__code__.co_argcount, _regex_match)

        with cursor(self._conn) as c:
            c.execute(_ROOT_ID_SQL)
            row = c.fetchone()
            if not row:
                self.root_id = ''
                return
            first_id = row['id']

            if c.fetchone():
                raise IntegrityError('Could not uniquely identify root node.')

            self.root_id = first_id

        self._execute_pragma('busy_timeout', self._conf['sqlite']['busy_timeout'])
        if sys.version_info[:3] != (3, 6, 0):
            self._execute_pragma('journal_mode', self._conf['sqlite']['journal_mode'])
Ejemplo n.º 6
0
        def __init__(self, storage_url, login, password, options):
            # Unused argument
            #pylint: disable=W0613

            super().__init__()

            if not storage_url.startswith("acd://"):
                raise QuietError('Invalid storage URL', exitcode=2)

            with Backend._static_lock:
                if Backend._acd_client is None:
                    # acd_cli path settings copied from acd_cli
                    _app_name = 'acd_cli'
                    cp = os.environ.get('ACD_CLI_CACHE_PATH')
                    sp = os.environ.get('ACD_CLI_SETTINGS_PATH')

                    CACHE_PATH = cp if cp else appdirs.user_cache_dir(
                        _app_name)
                    SETTINGS_PATH = sp if sp else appdirs.user_config_dir(
                        _app_name)

                    _SETTINGS_FILENAME = _app_name + '.ini'

                    def_conf = ConfigParser()
                    def_conf['download'] = dict(keep_corrupt=False,
                                                keep_incomplete=True)
                    def_conf['upload'] = dict(timeout_wait=10)

                    Backend._acd_conf = get_conf(SETTINGS_PATH,
                                                 _SETTINGS_FILENAME, def_conf)

                    Backend._acd_client = client.ACDClient(
                        CACHE_PATH, SETTINGS_PATH)

                    Backend._acd_cache = db.NodeCache(CACHE_PATH,
                                                      SETTINGS_PATH)

                    Backend.acd_client_owner = Backend._acd_cache.KeyValueStorage.get(
                        CacheConsts.OWNER_ID)

                self.parent_node_id = Backend._acd_cache.get_root_node().id

            self.path = storage_url[6:].strip("/")

            self._create_rootdir()
Ejemplo n.º 7
0
    def __init__(self, cache_path='', settings_path=''):
        """Initializes OAuth and endpoints."""

        self._conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf)

        self.cache_path = cache_path
        logger.info('Initializing ACD with path "%s".' % cache_path)

        self.handler = oauth.create_handler(cache_path)

        self._endpoint_data = {}
        self._load_endpoints()

        requests_timeout = (self._conf.getint('transfer', 'connection_timeout'),
                            self._conf.getint('transfer', 'idle_timeout'))
        proxies = dict(self._conf['proxies'])

        self.BOReq = BackOffRequest(self.handler, requests_timeout, proxies)
Ejemplo n.º 8
0
    def __init__(self, cache_path='', settings_path=''):
        """Initializes OAuth and endpoints."""

        self._conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf)

        self.cache_path = cache_path
        logger.info('Initializing ACD with path "%s".' % cache_path)

        self.handler = oauth.create_handler(cache_path)

        self._endpoint_data = {}
        self._load_endpoints()

        requests_timeout = (self._conf.getint('transfer',
                                              'connection_timeout'),
                            self._conf.getint('transfer', 'idle_timeout'))
        proxies = dict(self._conf['proxies'])

        self.BOReq = BackOffRequest(self.handler, requests_timeout, proxies)
Ejemplo n.º 9
0
def mount(path: str, args: dict, **kwargs) -> 'Union[int, None]':
    """Fusermounts Amazon Cloud Drive to specified mountpoint.

    :raises: RuntimeError
    :param args: args to pass on to ACDFuse init
    :param kwargs: fuse mount options as described in :manpage:`fuse(8)`"""

    if not os.path.isdir(path):
        logger.critical('Mountpoint does not exist or already used.')
        return 1

    opts = dict(auto_cache=True, sync_read=True)
    if sys.platform == 'linux':
        opts['big_writes']=True

    kwargs.update(opts)

    args['conf'] = get_conf(args['settings_path'], _SETTINGS_FILENAME, _def_conf)

    FUSE(ACDFuse(**args), path, subtype=ACDFuse.__name__, **kwargs)
Ejemplo n.º 10
0
def mount(path: str, args: dict, **kwargs) -> 'Union[int, None]':
    """Fusermounts Amazon Cloud Drive to specified mountpoint.

    :raises: RuntimeError
    :param args: args to pass on to ACDFuse init
    :param kwargs: fuse mount options as described in :manpage:`fuse(8)`"""

    if not os.path.isdir(path):
        logger.critical('Mountpoint does not exist or already used.')
        return 1

    opts = dict(auto_cache=True, sync_read=True)
    if sys.platform == 'linux':
        opts['big_writes'] = True

    kwargs.update(opts)

    args['conf'] = get_conf(args['settings_path'], _SETTINGS_FILENAME,
                            _def_conf)

    FUSE(ACDFuse(**args), path, subtype=ACDFuse.__name__, **kwargs)