Ejemplo n.º 1
0
 def test_upload(self):
     fn, sz = gen_rand_file()
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertIn('id', n)
     self.assertEqual(n['contentProperties']['size'], sz)
     self.assertEqual(n['contentProperties']['md5'], md5)
     n = trash.move_to_trash(n['id'])
     os.remove(fn)
Ejemplo n.º 2
0
 def test_upload(self):
     fn, sz = gen_rand_file()
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertIn('id', n)
     self.assertEqual(n['contentProperties']['size'], sz)
     self.assertEqual(n['contentProperties']['md5'], md5)
     n = trash.move_to_trash(n['id'])
     os.remove(fn)
Ejemplo n.º 3
0
 def test_overwrite(self):
     fn = gen_rand_nm()
     open(fn, 'wb').close()
     n = content.upload_file(fn)
     self.assertIn('id', n)
     n = content.overwrite_file(n['id'], fn)
     self.assertEqual(n['contentProperties']['version'], 2)
     trash.move_to_trash(n['id'])
     os.remove(fn)
Ejemplo n.º 4
0
def upload_file(path: str, parent_id: str, overwr: bool, force: bool) -> int:
    short_nm = os.path.basename(path)

    cached_file = query.get_node(parent_id).get_child(short_nm)
    file_id = None
    if cached_file:
        file_id = cached_file.id

    if not file_id:
        try:
            hasher = hashing.Hasher(path)
            r = content.upload_file(path, parent_id)
            sync.insert_node(r)
            file_id = r['id']
            cached_file = query.get_node(file_id)
            return compare_hashes(hasher.get_result(), cached_file.md5, short_nm)

        except RequestError as e:
            if e.status_code == 409:  # might happen if cache is outdated
                hasher.stop()
                logger.error('Uploading "%s" failed. Name collision with non-cached file. '
                             'If you want to overwrite, please sync and try again.' % short_nm)
                # colliding node ID is returned in error message -> could be used to continue
                return UL_DL_FAILED
            elif e.status_code == 504 or e.status_code == 408:  # proxy timeout / request timeout
                hasher.stop()
                logger.warning('Timeout while uploading "%s".' % short_nm)
                # TODO: wait; request parent folder's children
                return UL_TIMEOUT
            else:
                hasher.stop()
                logger.error('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg))
                return UL_DL_FAILED

    # else: file exists
    mod_time = (cached_file.modified - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)

    logger.info('Remote mtime: ' + str(mod_time) + ', local mtime: ' + str(os.path.getmtime(path))
                + ', local ctime: ' + str(os.path.getctime(path)))

    if not overwr and not force:
        print('Skipping upload of existing file "%s".' % short_nm)
        return 0

    # ctime is checked because files can be overwritten by files with older mtime
    if mod_time < os.path.getmtime(path) \
            or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \
            or force:
        return overwrite(file_id, path)
    elif not force:
        print('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm)
        return 0
    else:
        hasher = hashing.Hasher(path)
Ejemplo n.º 5
0
 def test_download(self):
     fn, sz = gen_rand_file()
     self.assertTrue(sz < content.CONSECUTIVE_DL_LIMIT)
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertIn('id', n)
     os.remove(fn)
     self.assertFalse(os.path.exists(fn))
     content.download_file(n['id'], fn)
     md5_dl = hashing.hash_file(fn)
     self.assertEqual(md5, md5_dl)
     trash.move_to_trash(n['id'])
     os.remove(fn)
Ejemplo n.º 6
0
 def test_download(self):
     fn, sz = gen_rand_file()
     self.assertTrue(sz < content.CONSECUTIVE_DL_LIMIT)
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertIn('id', n)
     os.remove(fn)
     self.assertFalse(os.path.exists(fn))
     content.download_file(n['id'], fn)
     md5_dl = hashing.hash_file(fn)
     self.assertEqual(md5, md5_dl)
     trash.move_to_trash(n['id'])
     os.remove(fn)
Ejemplo n.º 7
0
 def test_download_chunked(self):
     ch_sz = gen_rand_sz()
     content.CHUNK_SIZE = ch_sz
     fn, sz = gen_rand_file(size=5 * ch_sz)
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertEqual(n['contentProperties']['md5'], md5)
     os.remove(fn)
     self.assertFalse(os.path.exists(fn))
     f = io.BytesIO()
     content.chunked_download(n['id'], f, length=sz)
     trash.move_to_trash(n['id'])
     dl_md5 = hashing.hash_bytes(f)
     self.assertEqual(sz, f.tell())
     self.assertEqual(md5, dl_md5)
Ejemplo n.º 8
0
 def test_download_chunked(self):
     ch_sz = gen_rand_sz()
     content.CHUNK_SIZE = ch_sz
     fn, sz = gen_rand_file(size=5 * ch_sz)
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertEqual(n['contentProperties']['md5'], md5)
     os.remove(fn)
     self.assertFalse(os.path.exists(fn))
     f = io.BytesIO()
     content.chunked_download(n['id'], f, length=sz)
     trash.move_to_trash(n['id'])
     dl_md5 = hashing.hash_bytes(f)
     self.assertEqual(sz, f.tell())
     self.assertEqual(md5, dl_md5)
Ejemplo n.º 9
0
    def test_incomplete_download(self):
        ch_sz = gen_rand_sz()
        content.CHUNK_SIZE = ch_sz
        fn, sz = gen_rand_file(size=5 * ch_sz)
        md5 = hashing.hash_file(fn)
        n = content.upload_file(fn)
        self.assertEqual(n['contentProperties']['md5'], md5)
        os.remove(fn)
        self.assertFalse(os.path.exists(fn))
        with self.assertRaises(RequestError) as cm:
            content.download_file(n['id'], fn, length=sz + 1)

        #os.remove(fn + content.PARTIAL_SUFFIX)
        self.assertEqual(cm.exception.status_code, RequestError.CODE.INCOMPLETE_RESULT)
        content.download_file(n['id'], fn, length=sz)
        os.remove(fn)
Ejemplo n.º 10
0
    def test_incomplete_download(self):
        ch_sz = gen_rand_sz()
        content.CHUNK_SIZE = ch_sz
        fn, sz = gen_rand_file(size=5 * ch_sz)
        md5 = hashing.hash_file(fn)
        n = content.upload_file(fn)
        self.assertEqual(n['contentProperties']['md5'], md5)
        os.remove(fn)
        self.assertFalse(os.path.exists(fn))
        with self.assertRaises(RequestError) as cm:
            content.download_file(n['id'], fn, length=sz + 1)

        #os.remove(fn + content.PARTIAL_SUFFIX)
        self.assertEqual(cm.exception.status_code,
                         RequestError.CODE.INCOMPLETE_RESULT)
        content.download_file(n['id'], fn, length=sz)
        trash.move_to_trash(n['id'])
        os.remove(fn)
Ejemplo n.º 11
0
 def test_download_resume(self):
     ch_sz = gen_rand_sz()
     content.CHUNK_SIZE = ch_sz
     content.CONSECUTIVE_DL_LIMIT = ch_sz
     fn, sz = gen_rand_file(size=5 * ch_sz)
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertEqual(n['contentProperties']['md5'], md5)
     os.remove(fn)
     self.assertFalse(os.path.exists(fn))
     p_fn = fn + content.PARTIAL_SUFFIX
     with open(p_fn, 'wb') as f:
         content.chunked_download(n['id'], f, length=int(sz * random.random()))
     self.assertLess(os.path.getsize(p_fn), sz)
     content.download_file(n['id'], fn)
     trash.move_to_trash(n['id'])
     dl_md5 = hashing.hash_file(fn)
     self.assertEqual(md5, dl_md5)
     os.remove(fn)
Ejemplo n.º 12
0
 def test_download_resume(self):
     ch_sz = gen_rand_sz()
     content.CHUNK_SIZE = ch_sz
     content.CONSECUTIVE_DL_LIMIT = ch_sz
     fn, sz = gen_rand_file(size=5 * ch_sz)
     md5 = hashing.hash_file(fn)
     n = content.upload_file(fn)
     self.assertEqual(n['contentProperties']['md5'], md5)
     os.remove(fn)
     self.assertFalse(os.path.exists(fn))
     p_fn = fn + content.PARTIAL_SUFFIX
     with open(p_fn, 'wb') as f:
         content.chunked_download(n['id'],
                                  f,
                                  length=int(sz * random.random()))
     self.assertLess(os.path.getsize(p_fn), sz)
     content.download_file(n['id'], fn)
     trash.move_to_trash(n['id'])
     dl_md5 = hashing.hash_file(fn)
     self.assertEqual(md5, dl_md5)
     os.remove(fn)
Ejemplo n.º 13
0
def upload_file(path: str, parent_id: str, overwr: bool, force: bool, dedup: bool,
                pg_handler: progress.FileProgress=None) -> RetryRetVal:
    short_nm = os.path.basename(path)

    logger.info('Uploading %s' % path)

    cached_file = query.get_node(parent_id).get_child(short_nm)
    file_id = None
    if cached_file:
        file_id = cached_file.id

    if not file_id:
        if dedup and query.file_size_exists(os.path.getsize(path)):
            nodes = query.find_md5(hashing.hash_file(path))
            nodes = format.PathFormatter(nodes)
            if len(nodes) > 0:
                # print('Skipping upload of duplicate file "%s".' % short_nm)
                logger.info('Location of duplicates: %s' % nodes)
                pg_handler.done()
                return DUPLICATE

        try:
            hasher = hashing.IncrementalHasher()
            r = content.upload_file(path, parent_id,
                                    read_callbacks=[hasher.update, pg_handler.update],
                                    deduplication=dedup)
            sync.insert_node(r)
            file_id = r['id']
            md5 = query.get_node(file_id).md5
            return compare_hashes(hasher.get_result(), md5, short_nm)

        except RequestError as e:
            if e.status_code == 409:  # might happen if cache is outdated
                if not dedup:
                    logger.error('Uploading "%s" failed. Name collision with non-cached file. '
                                 'If you want to overwrite, please sync and try again.' % short_nm)
                else:
                    logger.error(
                        'Uploading "%s" failed. Name or hash collision with non-cached file.' % short_nm)
                    logger.info(e)
                # colliding node ID is returned in error message -> could be used to continue
                return CACHE_ASYNC
            elif e.status_code == 504 or e.status_code == 408:  # proxy timeout / request timeout
                logger.warning('Timeout while uploading "%s".' % short_nm)
                # TODO: wait; request parent folder's children
                return UL_TIMEOUT
            else:
                logger.error(
                    'Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg))
                return UL_DL_FAILED

    # else: file exists
    rmod = (cached_file.modified - datetime(1970, 1, 1)) / timedelta(seconds=1)
    rmod = datetime.utcfromtimestamp(rmod)
    lmod = datetime.utcfromtimestamp(os.path.getmtime(path))
    lcre = datetime.utcfromtimestamp(os.path.getctime(path))

    logger.info('Remote mtime: %s, local mtime: %s, local ctime: %s' % (rmod, lmod, lcre))

    if not overwr and not force:
        logging.info('Skipping upload of existing file "%s".' % short_nm)
        pg_handler.done()
        return 0

    # ctime is checked because files can be overwritten by files with older mtime
    if rmod < lmod or (rmod < lcre and cached_file.size != os.path.getsize(path)) \
            or force:
        return overwrite(file_id, path, dedup=dedup, pg_handler=pg_handler).ret_val
    elif not force:
        logging.info('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm)
        pg_handler.done()
        return 0
Ejemplo n.º 14
0
def upload_file(path: str, parent_id: str, overwr: bool, force: bool) -> int:
    short_nm = os.path.basename(path)

    cached_file = query.get_node(parent_id).get_child(short_nm)
    file_id = None
    if cached_file:
        file_id = cached_file.id

    if not file_id:
        try:
            hasher = hashing.Hasher(path)
            r = content.upload_file(path, parent_id)
            sync.insert_node(r)
            file_id = r['id']
            cached_file = query.get_node(file_id)
            return compare_hashes(hasher.get_result(), cached_file.md5,
                                  short_nm)

        except RequestError as e:
            if e.status_code == 409:  # might happen if cache is outdated
                hasher.stop()
                logger.error(
                    'Uploading "%s" failed. Name collision with non-cached file. '
                    'If you want to overwrite, please sync and try again.' %
                    short_nm)
                # colliding node ID is returned in error message -> could be used to continue
                return UL_DL_FAILED
            elif e.status_code == 504 or e.status_code == 408:  # proxy timeout / request timeout
                hasher.stop()
                logger.warning('Timeout while uploading "%s".' % short_nm)
                # TODO: wait; request parent folder's children
                return UL_TIMEOUT
            else:
                hasher.stop()
                logger.error('Uploading "%s" failed. Code: %s, msg: %s' %
                             (short_nm, e.status_code, e.msg))
                return UL_DL_FAILED

    # else: file exists
    mod_time = (cached_file.modified -
                datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)

    logger.info('Remote mtime: ' + str(mod_time) + ', local mtime: ' +
                str(os.path.getmtime(path)) + ', local ctime: ' +
                str(os.path.getctime(path)))

    if not overwr and not force:
        print('Skipping upload of existing file "%s".' % short_nm)
        return 0

    # ctime is checked because files can be overwritten by files with older mtime
    if mod_time < os.path.getmtime(path) \
            or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \
            or force:
        return overwrite(file_id, path)
    elif not force:
        print('Skipping upload of "%s" because of mtime or ctime and size.' %
              short_nm)
        return 0
    else:
        hasher = hashing.Hasher(path)
Ejemplo n.º 15
0
def upload_file(path: str, parent_id: str, overwr: bool, force: bool, dedup: bool,
                pg_handler: progress.FileProgress=None) -> RetryRetVal:
    short_nm = os.path.basename(path)

    if dedup and query.file_size_exists(os.path.getsize(path)):
        nodes = query.find_md5(hashing.hash_file(path))
        nodes = [n for n in format.PathFormatter(nodes)]
        if len(nodes) > 0:
            # print('Skipping upload of duplicate file "%s".' % short_nm)
            logger.info('Location of duplicates: %s' % nodes)
            pg_handler.done()
            return DUPLICATE

    conflicting_node = query.conflicting_node(short_nm, parent_id)
    file_id = None
    if conflicting_node:
        if conflicting_node.is_folder():
            logger.error('Name collision with existing folder '
                         'in the same location: "%".' % short_nm)
            return NAME_COLLISION

        file_id = conflicting_node.id

    if not file_id:
        logger.info('Uploading %s' % path)
        hasher = hashing.IncrementalHasher()
        try:
            r = content.upload_file(path, parent_id,
                                    read_callbacks=[hasher.update, pg_handler.update],
                                    deduplication=dedup)
        except RequestError as e:
            if e.status_code == 409:  # might happen if cache is outdated
                if not dedup:
                    logger.error('Uploading "%s" failed. Name collision with non-cached file. '
                                 'If you want to overwrite, please sync and try again.' % short_nm)
                else:
                    logger.error(
                        'Uploading "%s" failed. '
                        'Name or hash collision with non-cached file.' % short_nm)
                    logger.info(e)
                # colliding node ID is returned in error message -> could be used to continue
                return CACHE_ASYNC
            elif e.status_code == 504 or e.status_code == 408:  # proxy timeout / request timeout
                logger.warning('Timeout while uploading "%s".' % short_nm)
                # TODO: wait; request parent folder's children
                return UL_TIMEOUT
            else:
                logger.error(
                    'Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg))
                return UL_DL_FAILED
        else:
            sync.insert_node(r)
            file_id = r['id']
            md5 = query.get_node(file_id).md5
            return compare_hashes(hasher.get_result(), md5, short_nm)

    # else: file exists
    if not overwr and not force:
        logger.info('Skipping upload of existing file "%s".' % short_nm)
        pg_handler.done()
        return 0

    rmod = (conflicting_node.modified - datetime(1970, 1, 1)) / timedelta(seconds=1)
    rmod = datetime.utcfromtimestamp(rmod)
    lmod = datetime.utcfromtimestamp(os.path.getmtime(path))
    lcre = datetime.utcfromtimestamp(os.path.getctime(path))

    logger.debug('Remote mtime: %s, local mtime: %s, local ctime: %s' % (rmod, lmod, lcre))

    # ctime is checked because files can be overwritten by files with older mtime
    if rmod < lmod or (rmod < lcre and conflicting_node.size != os.path.getsize(path)) \
            or force:
        return overwrite(file_id, path, dedup=dedup, pg_handler=pg_handler).ret_val
    elif not force:
        logger.info('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm)
        pg_handler.done()
        return 0