Example #1
0
def upload_folder(folder: str, parent_id: str, overwr: bool, force: bool, exclude: list) -> int:
    if parent_id is None:
        parent_id = query.get_root_id()
    parent = query.get_node(parent_id)

    real_path = os.path.realpath(folder)
    short_nm = os.path.basename(real_path)

    curr_node = parent.get_child(short_nm)
    if not curr_node or curr_node.status == 'TRASH' or parent.status == 'TRASH':
        try:
            r = content.create_folder(short_nm, parent_id)
            sync.insert_node(r)
            curr_node = query.get_node(r['id'])
        except RequestError as e:
            if e.status_code == 409:
                logger.error('Folder "%s" already exists. Please sync.' % short_nm)
            else:
                logger.error('Error creating remote folder "%s".' % short_nm)
            return ERR_CR_FOLDER
    elif curr_node.is_file():
        logger.error('Cannot create remote folder "%s", because a file of the same name already exists.' % short_nm)
        return ERR_CR_FOLDER

    entries = sorted(os.listdir(folder))

    ret_val = 0
    for entry in entries:
        full_path = os.path.join(real_path, entry)
        ret_val |= upload(full_path, curr_node.id, overwr, force, exclude)

    return ret_val
Example #2
0
    def testPurge(self):
        root = gen_folder()
        file = gen_file([root])

        sync.insert_nodes([root, file])
        self.assertEqual(query.get_node_count(), 2)
        self.assertIsInstance(query.get_node(file['id']), db.File)

        sync.remove_purged([file['id']])
        self.assertIsNone(query.get_node(file['id']))
        self.assertEqual(query.get_node_count(), 1)
Example #3
0
    def testPurge(self):
        root = gen_folder()
        file = gen_file([root])

        sync.insert_nodes([root, file])
        self.assertEqual(query.get_node_count(), 2)
        self.assertIsInstance(query.get_node(file['id']), db.File)

        sync.remove_purged([file['id']])
        self.assertIsNone(query.get_node(file['id']))
        self.assertEqual(query.get_node_count(), 1)
Example #4
0
def upload_file(path: str, parent_id: str, overwr: bool, force: bool) -> int:
    short_nm = os.path.basename(path)

    cached_file = query.get_node(parent_id).get_child(short_nm)
    file_id = None
    if cached_file:
        file_id = cached_file.id

    if not file_id:
        try:
            hasher = hashing.Hasher(path)
            r = content.upload_file(path, parent_id)
            sync.insert_node(r)
            file_id = r['id']
            cached_file = query.get_node(file_id)
            return compare_hashes(hasher.get_result(), cached_file.md5, short_nm)

        except RequestError as e:
            if e.status_code == 409:  # might happen if cache is outdated
                hasher.stop()
                logger.error('Uploading "%s" failed. Name collision with non-cached file. '
                             'If you want to overwrite, please sync and try again.' % short_nm)
                # colliding node ID is returned in error message -> could be used to continue
                return UL_DL_FAILED
            elif e.status_code == 504 or e.status_code == 408:  # proxy timeout / request timeout
                hasher.stop()
                logger.warning('Timeout while uploading "%s".' % short_nm)
                # TODO: wait; request parent folder's children
                return UL_TIMEOUT
            else:
                hasher.stop()
                logger.error('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg))
                return UL_DL_FAILED

    # else: file exists
    mod_time = (cached_file.modified - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)

    logger.info('Remote mtime: ' + str(mod_time) + ', local mtime: ' + str(os.path.getmtime(path))
                + ', local ctime: ' + str(os.path.getctime(path)))

    if not overwr and not force:
        print('Skipping upload of existing file "%s".' % short_nm)
        return 0

    # ctime is checked because files can be overwritten by files with older mtime
    if mod_time < os.path.getmtime(path) \
            or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \
            or force:
        return overwrite(file_id, path)
    elif not force:
        print('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm)
        return 0
    else:
        hasher = hashing.Hasher(path)
Example #5
0
    def rename(self, old, new):
        if old == new:
            return

        logger.debug('rename %s %s' % (old, new))

        id = query.resolve_path(old)
        if not id:
            raise FuseOSError(errno.ENOENT)

        new_bn, old_bn = os.path.basename(new), os.path.basename(old)
        new_dn, old_dn = os.path.dirname(new), os.path.dirname(old)

        existing_id = query.resolve_path(new)
        if existing_id:
            en = query.get_node(existing_id)
            if en and en.is_file() and en.size == 0:
                trash.move_to_trash(existing_id)
            else:
                raise FuseOSError(errno.EEXIST)

        if new_bn != old_bn:
            self._rename(id, new_bn)

        if new_dn != old_dn:
            odir_id = query.resolve_path(old_dn)
            ndir_id = query.resolve_path(new_dn)
            if not odir_id or not ndir_id:
                raise FuseOSError(errno.ENOTDIR)
            self._move(id, odir_id, ndir_id)
Example #6
0
def download_folder(node_id: str, local_path: str, exclude: list) -> int:
    if not local_path:
        local_path = os.getcwd()

    node = query.get_node(node_id)

    if node.name is None:
        curr_path = os.path.join(local_path, 'acd')
    else:
        curr_path = os.path.join(local_path, node.name)

    print('Current path: %s' % curr_path)
    try:
        os.makedirs(curr_path, exist_ok=True)
    except OSError:
        logger.error('Error creating directory "%s".' % curr_path)
        return ERR_CR_FOLDER

    children = sorted(node.children)
    ret_val = 0
    for child in children:
        if child.status != 'AVAILABLE':
            continue
        if child.is_file():
            ret_val |= download(child.id, curr_path, exclude)
        elif child.is_folder():
            ret_val |= download_folder(child.id, curr_path, exclude)

    return ret_val
Example #7
0
def download(node_id: str, local_path: str, exclude: list) -> int:
    node = query.get_node(node_id)

    if not node.is_available():
        return 0

    if node.is_folder():
        return download_folder(node_id, local_path, exclude)

    loc_name = node.name

    # # downloading a non-cached node
    # if not loc_name:
    # loc_name = node_id

    for reg in exclude:
        if re.match(reg, loc_name):
            print('Skipping download of "%s" because of exclusion pattern.' % loc_name)
            return 0

    hasher = hashing.IncrementalHasher()

    try:
        print('Current file: %s' % loc_name)
        content.download_file(node_id, loc_name, local_path, length=node.size, write_callback=hasher.update)
    except RequestError as e:
        logger.error('Downloading "%s" failed. Code: %s, msg: %s' % (loc_name, e.status_code, e.msg))
        return UL_DL_FAILED

    return compare_hashes(hasher.get_result(), node.md5, loc_name)
Example #8
0
    def rename(self, old, new):
        if old == new:
            return

        id = query.resolve_path(old, False)
        if not id:
            raise FuseOSError(errno.ENOENT)

        new_bn, old_bn = os.path.basename(new), os.path.basename(old)
        new_dn, old_dn = os.path.dirname(new), os.path.dirname(old)

        existing_id = query.resolve_path(new, False)
        if existing_id:
            en = query.get_node(existing_id)
            if en and en.is_file():
                trash.move_to_trash(existing_id)
            else:
                raise FuseOSError(errno.EEXIST)

        if new_bn != old_bn:
            self._rename(id, new_bn)

        if new_dn != old_dn:
            odir_id = query.resolve_path(old_dn, False)
            ndir_id = query.resolve_path(new_dn, False)
            if not odir_id or not ndir_id:
                raise FuseOSError(errno.ENOTDIR)
            self._move(id, odir_id, ndir_id)
Example #9
0
def download_folder(node_id: str, local_path: str, exclude: list) -> int:
    if not local_path:
        local_path = os.getcwd()

    node = query.get_node(node_id)

    if node.name is None:
        curr_path = os.path.join(local_path, 'acd')
    else:
        curr_path = os.path.join(local_path, node.name)

    print('Current path: %s' % curr_path)
    try:
        os.makedirs(curr_path, exist_ok=True)
    except OSError:
        logger.error('Error creating directory "%s".' % curr_path)
        return ERR_CR_FOLDER

    children = sorted(node.children)
    ret_val = 0
    for child in children:
        if child.status != 'AVAILABLE':
            continue
        if child.is_file():
            ret_val |= download(child.id, curr_path, exclude)
        elif child.is_folder():
            ret_val |= download_folder(child.id, curr_path, exclude)

    return ret_val
Example #10
0
 def testInsertFile(self):
     root = gen_folder()
     sync.insert_node(root)
     file = gen_file([root])
     sync.insert_node(file)
     n = query.get_node(file['id'])
     self.assertEqual(len(n.parents), 1)
     self.assertEqual(query.get_node_count(), 2)
Example #11
0
 def testInsertFile(self):
     root = gen_folder()
     sync.insert_node(root)
     file = gen_file([root])
     sync.insert_node(file)
     n = query.get_node(file['id'])
     self.assertEqual(len(n.parents), 1)
     self.assertEqual(query.get_node_count(), 2)
Example #12
0
    def testMultiParentNode(self):
        root = gen_folder()
        folder = gen_folder([root])
        file = gen_file([root])
        file['parents'].append(folder['id'])
        self.assertEqual(len(file['parents']), 2)

        sync.insert_nodes([root, folder, file])
        self.assertEqual(query.get_node_count(), 3)
        self.assertEqual(query.get_node(file['id']).parents.__len__(), 2)
Example #13
0
    def testMultiParentNode(self):
        root = gen_folder()
        folder = gen_folder([root])
        file = gen_file([root])
        file['parents'].append(folder['id'])
        self.assertEqual(len(file['parents']), 2)

        sync.insert_nodes([root, folder, file])
        self.assertEqual(query.get_node_count(), 3)
        self.assertEqual(query.get_node(file['id']).parents.__len__(), 2)
Example #14
0
def overwrite(node_id, local_file) -> int:
    hasher = hashing.Hasher(local_file)
    try:
        r = content.overwrite_file(node_id, local_file)
        sync.insert_node(r)
        node = query.get_node(r['id'])
        return compare_hashes(node.md5, hasher.get_result(), local_file)
    except RequestError as e:
        hasher.stop()
        logger.error('Error overwriting file. Code: %s, msg: %s' % (e.status_code, e.msg))
        return UL_DL_FAILED
Example #15
0
def overwrite(node_id, local_file) -> int:
    hasher = hashing.Hasher(local_file)
    try:
        r = content.overwrite_file(node_id, local_file)
        sync.insert_node(r)
        node = query.get_node(r['id'])
        return compare_hashes(node.md5, hasher.get_result(), local_file)
    except RequestError as e:
        hasher.stop()
        logger.error('Error overwriting file. Code: %s, msg: %s' %
                     (e.status_code, e.msg))
        return UL_DL_FAILED
Example #16
0
    def getattr(self, path, fh=None):
        id = query.resolve_path(path, trash=False)
        node = query.get_node(id)
        if not node:
            raise FuseOSError(errno.ENOENT)

        times = dict(st_atime=time(),
                     st_mtime=(node.modified - datetime(1970, 1, 1)) / timedelta(seconds=1),
                     st_ctime=(node.created - datetime(1970, 1, 1)) / timedelta(seconds=1))

        if node.is_folder():
            return dict(st_mode=stat.S_IFDIR | 0o7777, **times)
        if node.is_file():
            return dict(st_mode=stat.S_IFREG | 0o6667, st_size=node.size, **times)
Example #17
0
def upload_folder(folder: str, parent_id: str, overwr: bool, force: bool,
                  exclude: list) -> int:
    if parent_id is None:
        parent_id = query.get_root_id()
    parent = query.get_node(parent_id)

    real_path = os.path.realpath(folder)
    short_nm = os.path.basename(real_path)

    curr_node = parent.get_child(short_nm)
    if not curr_node or curr_node.status == 'TRASH' or parent.status == 'TRASH':
        try:
            r = content.create_folder(short_nm, parent_id)
            sync.insert_node(r)
            curr_node = query.get_node(r['id'])
        except RequestError as e:
            if e.status_code == 409:
                logger.error('Folder "%s" already exists. Please sync.' %
                             short_nm)
            else:
                logger.error('Error creating remote folder "%s".' % short_nm)
            return ERR_CR_FOLDER
    elif curr_node.is_file():
        logger.error(
            'Cannot create remote folder "%s", because a file of the same name already exists.'
            % short_nm)
        return ERR_CR_FOLDER

    entries = sorted(os.listdir(folder))

    ret_val = 0
    for entry in entries:
        full_path = os.path.join(real_path, entry)
        ret_val |= upload(full_path, curr_node.id, overwr, force, exclude)

    return ret_val
Example #18
0
    def testFileMovement(self):
        root = gen_folder()
        folder = gen_folder([root])
        self.assertNotEqual(root['id'], folder['id'])

        file = gen_file([root])
        sync.insert_nodes([root, file])
        n = query.get_node(file['id'])
        self.assertEqual(n.parents[0].id, root['id'])

        file['parents'] = [folder['id']]
        sync.insert_nodes([folder, file])
        self.assertEqual(n.parents[0].id, folder['id'])

        self.assertEqual(len(n.parents), 1)
        self.assertEqual(query.get_node_count(), 3)
Example #19
0
    def testFileMovement(self):
        root = gen_folder()
        folder = gen_folder([root])
        self.assertNotEqual(root['id'], folder['id'])

        file = gen_file([root])
        sync.insert_nodes([root, file])
        n = query.get_node(file['id'])
        self.assertEqual(n.parents[0].id, root['id'])

        file['parents'] = [folder['id']]
        sync.insert_nodes([folder, file])
        self.assertEqual(n.parents[0].id, folder['id'])

        self.assertEqual(len(n.parents), 1)
        self.assertEqual(query.get_node_count(), 3)
Example #20
0
    def getattr(self, path, fh=None):
        logger.debug('getattr %s' % path)

        id = query.resolve_path(path, trash=False)
        node = query.get_node(id)
        if not node:
            raise FuseOSError(errno.ENOENT)

        times = dict(st_atime=time(),
                     st_mtime=(node.modified - datetime(1970, 1, 1)) / timedelta(seconds=1),
                     st_ctime=(node.created - datetime(1970, 1, 1)) / timedelta(seconds=1))

        if node.is_folder():
            nlinks = 1 + len(node.parents)
            for c in node.children:
                if c.is_folder() and c.is_available():
                    nlinks += 1
            return dict(st_mode=stat.S_IFDIR | 0o0777, st_nlink=nlinks, **times)
        if node.is_file():
            return dict(st_mode=stat.S_IFREG | 0o0666,
                        st_nlink=len(node.parents), st_size=node.size, **times)
Example #21
0
    def action(args: argparse.Namespace) -> int:
        import subprocess
        import logging
        import sys
        from acdcli.api import metadata
        from acdcli.cache import query

        logger = logging.getLogger(__name__)

        n = query.get_node(args.node)
        r = metadata.get_metadata(args.node)
        try:
            link = r['tempLink']
        except KeyError:
            logger.critical('Could not get temporary URL for "%s".' % n.simple_name())
            return 1

        if sys.platform == 'linux':
            subprocess.call(['mimeopen', '--no-ask', link + '#' + n.simple_name()])
        else:
            logger.critical('OS not supported.')
            return 1
Example #22
0
    def action(args: argparse.Namespace) -> int:
        import subprocess
        import logging
        import sys
        from acdcli.api import metadata
        from acdcli.cache import query

        logger = logging.getLogger(__name__)

        n = query.get_node(args.node)
        r = metadata.get_metadata(args.node)
        try:
            link = r['tempLink']
        except KeyError:
            logger.critical('Could not get temporary URL for "%s".' % n.simple_name())
            return 1

        if sys.platform == 'linux':
            subprocess.call(['mimeopen', '--no-ask', link + '#' + n.simple_name()])
        else:
            logger.critical('OS not supported.')
            return 1
Example #23
0
def download(node_id: str, local_path: str, exclude: list) -> int:
    node = query.get_node(node_id)

    if not node.is_available():
        return 0

    if node.is_folder():
        return download_folder(node_id, local_path, exclude)

    loc_name = node.name

    # # downloading a non-cached node
    # if not loc_name:
    # loc_name = node_id

    for reg in exclude:
        if re.match(reg, loc_name):
            print('Skipping download of "%s" because of exclusion pattern.' %
                  loc_name)
            return 0

    hasher = hashing.IncrementalHasher()

    try:
        print('Current file: %s' % loc_name)
        content.download_file(node_id,
                              loc_name,
                              local_path,
                              length=node.size,
                              write_callback=hasher.update)
    except RequestError as e:
        logger.error('Downloading "%s" failed. Code: %s, msg: %s' %
                     (loc_name, e.status_code, e.msg))
        return UL_DL_FAILED

    return compare_hashes(hasher.get_result(), node.md5, loc_name)
Example #24
0
 def testInsertFolder(self):
     folder = gen_folder()
     sync.insert_node(folder)
     n = query.get_node(folder['id'])
     self.assertEqual(n.id, folder['id'])
     self.assertEqual(query.get_node_count(), 1)
Example #25
0
def upload_file(path: str, parent_id: str, overwr: bool, force: bool) -> int:
    short_nm = os.path.basename(path)

    cached_file = query.get_node(parent_id).get_child(short_nm)
    file_id = None
    if cached_file:
        file_id = cached_file.id

    if not file_id:
        try:
            hasher = hashing.Hasher(path)
            r = content.upload_file(path, parent_id)
            sync.insert_node(r)
            file_id = r['id']
            cached_file = query.get_node(file_id)
            return compare_hashes(hasher.get_result(), cached_file.md5,
                                  short_nm)

        except RequestError as e:
            if e.status_code == 409:  # might happen if cache is outdated
                hasher.stop()
                logger.error(
                    'Uploading "%s" failed. Name collision with non-cached file. '
                    'If you want to overwrite, please sync and try again.' %
                    short_nm)
                # colliding node ID is returned in error message -> could be used to continue
                return UL_DL_FAILED
            elif e.status_code == 504 or e.status_code == 408:  # proxy timeout / request timeout
                hasher.stop()
                logger.warning('Timeout while uploading "%s".' % short_nm)
                # TODO: wait; request parent folder's children
                return UL_TIMEOUT
            else:
                hasher.stop()
                logger.error('Uploading "%s" failed. Code: %s, msg: %s' %
                             (short_nm, e.status_code, e.msg))
                return UL_DL_FAILED

    # else: file exists
    mod_time = (cached_file.modified -
                datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)

    logger.info('Remote mtime: ' + str(mod_time) + ', local mtime: ' +
                str(os.path.getmtime(path)) + ', local ctime: ' +
                str(os.path.getctime(path)))

    if not overwr and not force:
        print('Skipping upload of existing file "%s".' % short_nm)
        return 0

    # ctime is checked because files can be overwritten by files with older mtime
    if mod_time < os.path.getmtime(path) \
            or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \
            or force:
        return overwrite(file_id, path)
    elif not force:
        print('Skipping upload of "%s" because of mtime or ctime and size.' %
              short_nm)
        return 0
    else:
        hasher = hashing.Hasher(path)
Example #26
0
 def testInsertFolder(self):
     folder = gen_folder()
     sync.insert_node(folder)
     n = query.get_node(folder['id'])
     self.assertEqual(n.id, folder['id'])
     self.assertEqual(query.get_node_count(), 1)