def test_download(self): fn, sz = gen_rand_file() self.assertTrue(sz < content.CONSECUTIVE_DL_LIMIT) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertIn('id', n) os.remove(fn) self.assertFalse(os.path.exists(fn)) content.download_file(n['id'], fn) md5_dl = hashing.hash_file(fn) self.assertEqual(md5, md5_dl) trash.move_to_trash(n['id']) os.remove(fn)
def test_download_chunked(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz fn, sz = gen_rand_file(size=5 * ch_sz) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertEqual(n['contentProperties']['md5'], md5) os.remove(fn) self.assertFalse(os.path.exists(fn)) with open(fn, 'wb') as f: content.chunked_download(n['id'], f, length=sz) trash.move_to_trash(n['id']) dl_md5 = hashing.hash_file(fn) self.assertEqual(sz, os.path.getsize(fn)) self.assertEqual(md5, dl_md5) os.remove(fn)
def test_upload(self): fn, sz = gen_rand_file() md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertIn('id', n) self.assertEqual(n['contentProperties']['size'], sz) self.assertEqual(n['contentProperties']['md5'], md5) n = trash.move_to_trash(n['id']) os.remove(fn)
def test_download_resume(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz content.CONSECUTIVE_DL_LIMIT = ch_sz fn, sz = gen_rand_file(size=5 * ch_sz) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertEqual(n['contentProperties']['md5'], md5) os.remove(fn) self.assertFalse(os.path.exists(fn)) p_fn = fn + content.PARTIAL_SUFFIX with open(p_fn, 'wb') as f: content.chunked_download(n['id'], f, length=int(sz * random.random())) self.assertLess(os.path.getsize(p_fn), sz) content.download_file(n['id'], fn) trash.move_to_trash(n['id']) dl_md5 = hashing.hash_file(fn) self.assertEqual(md5, dl_md5) os.remove(fn)
def test_download_resume(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz content.CONSECUTIVE_DL_LIMIT = ch_sz f, sz = gen_temp_file(size=5 * ch_sz) md5 = hashing.hash_file(f.name) n = self.acd_client.upload_file(f.name) self.assertEqual(n["contentProperties"]["md5"], md5) f.close() basename = os.path.basename(f.name) self.assertFalse(os.path.exists(f.name)) p_fn = basename + content.PARTIAL_SUFFIX with open(p_fn, "wb") as f: self.acd_client.chunked_download(n["id"], f, length=int(sz * random.random())) self.assertLess(os.path.getsize(p_fn), sz) self.acd_client.download_file(n["id"], basename) self.acd_client.move_to_trash(n["id"]) dl_md5 = hashing.hash_file(basename) self.assertEqual(md5, dl_md5) os.remove(basename)
def test_download_resume(self): ch_sz = gen_rand_sz() self.acd_client._conf['transfer']['dl_chunk_size'] = str(ch_sz) f, sz = gen_temp_file(size=5 * ch_sz) md5 = hashing.hash_file(f.name) n = self.acd_client.upload_file(f.name) self.assertEqual(n['contentProperties']['md5'], md5) f.close() basename = os.path.basename(f.name) self.assertFalse(os.path.exists(f.name)) p_fn = basename + content.PARTIAL_SUFFIX with open(p_fn, 'wb') as f: self.acd_client.chunked_download(n['id'], f, length=int(sz * random.random())) self.assertLess(os.path.getsize(p_fn), sz) self.acd_client.download_file(n['id'], basename) self.acd_client.move_to_trash(n['id']) dl_md5 = hashing.hash_file(basename) self.assertEqual(md5, dl_md5) os.remove(basename)
def test_download(self): f, sz = gen_temp_file() self.assertTrue(sz < content.CONSECUTIVE_DL_LIMIT) md5 = hashing.hash_file_obj(f) n = self.acd_client.upload_file(f.name) self.assertIn('id', n) f.close() self.assertFalse(os.path.exists(f.name)) self.acd_client.download_file(n['id'], f.name) md5_dl = hashing.hash_file(f.name) self.assertEqual(md5, md5_dl) self.acd_client.move_to_trash(n['id'])
def test_download(self): f, sz = gen_temp_file() self.assertTrue( sz < self.acd_client._conf.getint('transfer', 'dl_chunk_size')) md5 = hashing.hash_file_obj(f) n = self.acd_client.upload_file(f.name) self.assertIn('id', n) f.close() self.assertFalse(os.path.exists(f.name)) self.acd_client.download_file(n['id'], f.name) md5_dl = hashing.hash_file(f.name) self.assertEqual(md5, md5_dl) self.acd_client.move_to_trash(n['id'])
def test_incomplete_download(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz fn, sz = gen_rand_file(size=5 * ch_sz) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertEqual(n['contentProperties']['md5'], md5) os.remove(fn) self.assertFalse(os.path.exists(fn)) with self.assertRaises(RequestError) as cm: content.download_file(n['id'], fn, length=sz + 1) #os.remove(fn + content.PARTIAL_SUFFIX) self.assertEqual(cm.exception.status_code, RequestError.CODE.INCOMPLETE_RESULT) content.download_file(n['id'], fn, length=sz) os.remove(fn)
def test_incomplete_download(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz fn, sz = gen_rand_file(size=5 * ch_sz) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertEqual(n['contentProperties']['md5'], md5) os.remove(fn) self.assertFalse(os.path.exists(fn)) with self.assertRaises(RequestError) as cm: content.download_file(n['id'], fn, length=sz + 1) #os.remove(fn + content.PARTIAL_SUFFIX) self.assertEqual(cm.exception.status_code, RequestError.CODE.INCOMPLETE_RESULT) content.download_file(n['id'], fn, length=sz) trash.move_to_trash(n['id']) os.remove(fn)
def upload_file(path: str, parent_id: str, overwr: bool, force: bool, dedup: bool, pg_handler: progress.FileProgress = None) -> RetryRetVal: short_nm = os.path.basename(path) if dedup and cache.file_size_exists(os.path.getsize(path)): nodes = cache.find_md5(hashing.hash_file(path)) nodes = [n for n in format.PathFormatter(nodes)] if len(nodes) > 0: # print('Skipping upload of duplicate file "%s".' % short_nm) logger.info('Location of duplicates: %s' % nodes) pg_handler.done() return DUPLICATE conflicting_node = cache.conflicting_node(short_nm, parent_id) file_id = None if conflicting_node: if conflicting_node.is_folder(): logger.error('Name collision with existing folder ' 'in the same location: "%s".' % short_nm) return NAME_COLLISION file_id = conflicting_node.id if not file_id: logger.info('Uploading %s' % path) hasher = hashing.IncrementalHasher() try: r = acd_client.upload_file(path, parent_id, read_callbacks=[hasher.update, pg_handler.update], deduplication=dedup) except RequestError as e: if e.status_code == 409: # might happen if cache is outdated if not dedup: logger.error('Uploading "%s" failed. Name collision with non-cached file. ' 'If you want to overwrite, please sync and try again.' % short_nm) else: logger.error( 'Uploading "%s" failed. ' 'Name or hash collision with non-cached file.' % short_nm) logger.info(e) # colliding node ID is returned in error message -> could be used to continue return CACHE_ASYNC elif e.status_code == 504 or e.status_code == 408: # proxy timeout / request timeout logger.warning('Timeout while uploading "%s".' % short_nm) # TODO: wait; request parent folder's children return UL_TIMEOUT else: logger.error( 'Uploading "%s" failed. %s.' % (short_nm, str(e))) return UL_DL_FAILED else: cache.insert_node(r) file_id = r['id'] md5 = cache.get_node(file_id).md5 return compare_hashes(hasher.get_result(), md5, short_nm) # else: file exists if not overwr and not force: logger.info('Skipping upload of existing file "%s".' % short_nm) pg_handler.done() return 0 rmod = (conflicting_node.modified - datetime(1970, 1, 1)) / timedelta(seconds=1) rmod = datetime.utcfromtimestamp(rmod) lmod = datetime.utcfromtimestamp(os.path.getmtime(path)) lcre = datetime.utcfromtimestamp(os.path.getctime(path)) logger.debug('Remote mtime: %s, local mtime: %s, local ctime: %s' % (rmod, lmod, lcre)) # ctime is checked because files can be overwritten by files with older mtime if rmod < lmod or (rmod < lcre and conflicting_node.size != os.path.getsize(path)) \ or force: return overwrite(file_id, path, dedup=dedup, pg_handler=pg_handler).ret_val elif not force: logger.info('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm) pg_handler.done() return 0