def insert_local_state(info, parent_path): nonlocal marker if info.name == "File.txt" and not marker: local.rename("/File.txt", "Renamed File.txt") sleep(2) marker = True EngineDAO.insert_local_state(self.engine_1._dao, info, parent_path)
def update_remote_state(row, *args, **kwargs): nonlocal marker EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) if not marker and row.local_name == "File.txt": self.engine_1.local.rename(row.local_path, "Renamed File.txt") marker = True
def insert_local_state(info, parent_path): global marker if info.name == 'File.txt' and not marker: self.local_client_1.rename('/File.txt', 'Renamed File.txt') sleep(2) marker = True EngineDAO.insert_local_state(self.engine_1._dao, info, parent_path)
def update_remote_state(row, *args, **kwargs): nonlocal marker EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) if row.local_name == "New Folder" and not marker: root_local.rename(row.local_path, "Renamed Folder") marker = True
def insert_local_state(info, parent_path): nonlocal marker if info.name == "File.txt" and not marker: local.rename("/File.txt", "Renamed File.txt") sleep(2) marker = True EngineDAO.insert_local_state(self.engine_1.dao, info, parent_path)
def update_remote_state(row, info, remote_parent_path=None, versionned=True): global marker EngineDAO.update_remote_state(self.engine_1._dao, row, info, remote_parent_path, versionned) if row.local_name == 'File.txt' and not marker: root_local_client.rename(row.local_path, 'Renamed File.txt') sleep(5) marker = True
def update_remote_state(row, info, remote_parent_path=None, versionned=True, queue=True, force_update=False, no_digest=False): global marker EngineDAO.update_remote_state(self.engine_1._dao, row, info, remote_parent_path=remote_parent_path, versionned=versionned, queue=queue, force_update=force_update, no_digest=no_digest) if row.local_name == 'File.txt' and not marker: root_local_client.rename(row.local_path, 'Renamed File.txt') marker = True
def update_remote_state(row, info, remote_parent_path=None, versionned=True, queue=True, force_update=False): global marker EngineDAO.update_remote_state(self.engine_1._dao, row, info, remote_parent_path=remote_parent_path, versionned=versionned, queue=queue, force_update=force_update) if row.local_name == 'New Folder' and not marker: root_local_client.rename(row.local_path, 'Renamed Folder') sleep(5) marker = True
def setUp(self): self.build_workspace = os.environ.get('WORKSPACE') self.tmpdir = None if self.build_workspace is not None: self.tmpdir = os.path.join(self.build_workspace, "tmp") if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.tmp_db = self.get_db_temp_file() db = open(self._get_default_db(), 'rb') with open(self.tmp_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(self.tmp_db.name)
def setUp(self): self.tmpdir = os.path.join(os.environ.get('WORKSPACE', ''), 'tmp') self.addCleanup(clean_dir, self.tmpdir) if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.tmp_db = self.get_db_temp_file() with open(self._get_default_db(), 'rb') as db, \ open(self.tmp_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(self.tmp_db.name) self.addCleanup(self._clean_dao, self._dao)
def test_init_db(self): init_db = self.get_db_temp_file() if sys.platform != 'win32': os.remove(init_db.name) dao = EngineDAO(init_db.name) # Test filters table self.assertEqual(0, len(dao.get_filters())) # Test state table self.assertEqual(0, len(dao.get_conflicts())) # Test configuration self.assertIsNone(dao.get_config("remote_user")) # Test RemoteScan table self.assertFalse(dao.is_path_scanned("/")) self._clean_dao(dao)
def test_migration_db_v1_with_duplicates(self): # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration_duplicate.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() rows = c.execute("SELECT * FROM States").fetchall() self.assertEquals(len(rows), 0) cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEquals(len(cols), 30) self.assertIsNone(self._dao.get_config("remote_last_event_log_id")) self.assertIsNone(self._dao.get_config("remote_last_full_scan"))
def update_remote_state(row, info, remote_parent_path=None, versionned=True, queue=True, force_update=False): global marker EngineDAO.update_remote_state( self.engine_1._dao, row, info, remote_parent_path=remote_parent_path, versionned=versionned, queue=queue, force_update=force_update) if row.local_name == 'New Folder' and not marker: root_local_client.rename(row.local_path, 'Renamed Folder') sleep(5) marker = True
def update_remote_state(row, info, remote_parent_path=None, versionned=True, queue=True, force_update=False, no_digest=False): global marker EngineDAO.update_remote_state( self.engine_1._dao, row, info, remote_parent_path=remote_parent_path, versionned=versionned, queue=queue, force_update=force_update, no_digest=no_digest) if row.local_name == 'File.txt' and not marker: root_local_client.rename(row.local_path, 'Renamed File.txt') marker = True
def test_migration_db_v1(self): init_db = self.get_db_temp_file() # Test empty db dao = EngineDAO(init_db.name) self._clean_dao(dao) # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEquals(len(cols), 30) cols = c.execute("SELECT * FROM States").fetchall() self.assertEquals(len(cols), 63) self.test_batch_folder_files() self.test_batch_upload_files() self.test_conflicts() self.test_errors() self.test_acquire_processors() self.test_configuration()
def test_migration_db_v1_with_duplicates(self): # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration_duplicate.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() rows = c.execute("SELECT * FROM States").fetchall() self.assertEqual(len(rows), 0) cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEqual(len(cols), 30) self.assertIsNone(self._dao.get_config("remote_last_event_log_id")) self.assertIsNone(self._dao.get_config("remote_last_full_scan"))
def test_init_db(self): init_db = self.get_db_temp_file() if sys.platform != 'win32': os.remove(init_db.name) dao = EngineDAO(init_db.name) # Test filters table self.assertEquals(0, len(dao.get_filters())) # Test state table self.assertEquals(0, len(dao.get_conflicts())) # Test configuration self.assertIsNone(dao.get_config("remote_user")) # Test RemoteScan table self.assertFalse(dao.is_path_scanned("/")) self._clean_dao(dao)
def test_migration_db_v1(self): init_db = self.get_db_temp_file() # Test empty db dao = EngineDAO(init_db.name) self._clean_dao(dao) # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEquals(len(cols), 30) self.test_batch_folder_files() self.test_batch_upload_files() self.test_conflicts() self.test_errors() self.test_acquire_processors() self.test_configuration()
def test_migration_db_v1(self): # Initialize old DB old_db = os.path.join(self.test_folder, 'nxdrive.db') with open(self._get_db('test_manager_migration.db'), 'rb') as db,\ open(old_db, 'wb') as f: f.write(db.read()) # Update token with one acquired against the test server conn = sqlite3.connect(old_db) c = conn.cursor() device_id = c.execute("SELECT device_id FROM device_config LIMIT 1").fetchone()[0] remote_client = RemoteDocumentClientForTests( self.nuxeo_url, self.admin_user, device_id, nxdrive.__version__, password=self.admin_password) token = remote_client.request_token() c.execute("UPDATE server_bindings SET remote_token='%s' WHERE local_folder='%s'" % ( token, '/home/ataillefer/Nuxeo Drive')) # Update server URL with test server URL c.execute("UPDATE server_bindings SET server_url='%s' WHERE local_folder='%s'" % ( self.nuxeo_url, '/home/ataillefer/Nuxeo Drive')) # Update local folder with test temp dir local_folder = os.path.join(self.test_folder, 'Nuxeo Drive') c.execute("UPDATE server_bindings SET local_folder='%s' WHERE local_folder='%s'" % ( local_folder, '/home/ataillefer/Nuxeo Drive')) conn.commit() conn.close() # Create Manager with old DB migration manager = self._create_manager() self.addCleanup(manager.stop) dao = manager.get_dao() # Check Manager config self.assertEqual(dao.get_config('device_id'), device_id) self.assertEqual(dao.get_config('proxy_config'), 'Manual') self.assertEqual(dao.get_config('proxy_type'), 'http') self.assertEqual(dao.get_config('proxy_server'), 'proxy.server.com') self.assertEqual(dao.get_config('proxy_port'), '80') self.assertEqual(dao.get_config('proxy_authenticated'), '1') self.assertEqual(dao.get_config('proxy_username'), 'Administrator') self.assertEqual(dao.get_config('auto_update'), '1') self.assertEqual(dao.get_config('proxy_config'), 'Manual') # Check engine definition engines = dao.get_engines() self.assertEqual(len(engines), 1) engine = engines[0] self.assertEqual(engine.engine, 'NXDRIVE') self.assertEqual(engine.name, manager._get_engine_name(self.nuxeo_url)) self.assertTrue(local_folder in engine.local_folder) # Check engine config engine_uid = engine.uid engine_db = os.path.join(self.test_folder, 'ndrive_%s.db' % engine_uid) engine_dao = EngineDAO(engine_db) self.assertEqual(engine_dao.get_config('server_url'), self.nuxeo_url) self.assertEqual(engine_dao.get_config('remote_user'), 'Administrator') self.assertEqual(engine_dao.get_config('remote_token'), token) engine_dao.dispose() manager.dispose_all()
def update_remote_state(row, *args, **kwargs): nonlocal marker EngineDAO.update_remote_state(self.engine_1._dao, row, *args, **kwargs) if row.local_name == "New Folder" and not marker: root_local.rename(row.local_path, "Renamed Folder") marker = True
class EngineDAOTest(unittest.TestCase): def _get_default_db(self, name='test_engine.db'): return os.path.join(os.path.dirname(__file__), 'resources', name) def _clean_dao(self, dao): dao.dispose() if sys.platform == 'win32': os.remove(dao.get_db()) def get_db_temp_file(self): tmp_db = tempfile.NamedTemporaryFile(suffix="test_db", dir=self.tmpdir) if sys.platform == 'win32': tmp_db.close() return tmp_db def setUp(self): self.tmpdir = os.path.join(os.environ.get('WORKSPACE', ''), 'tmp') self.addCleanup(clean_dir, self.tmpdir) if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.tmp_db = self.get_db_temp_file() with open(self._get_default_db(), 'rb') as db, \ open(self.tmp_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(self.tmp_db.name) self.addCleanup(self._clean_dao, self._dao) def test_init_db(self): init_db = self.get_db_temp_file() if sys.platform != 'win32': os.remove(init_db.name) dao = EngineDAO(init_db.name) # Test filters table self.assertEqual(0, len(dao.get_filters())) # Test state table self.assertEqual(0, len(dao.get_conflicts())) # Test configuration self.assertIsNone(dao.get_config("remote_user")) # Test RemoteScan table self.assertFalse(dao.is_path_scanned("/")) self._clean_dao(dao) def test_migration_db_v1_with_duplicates(self): # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration_duplicate.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() rows = c.execute("SELECT * FROM States").fetchall() self.assertEqual(len(rows), 0) cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEqual(len(cols), 30) self.assertIsNone(self._dao.get_config("remote_last_event_log_id")) self.assertIsNone(self._dao.get_config("remote_last_full_scan")) def test_migration_db_v1(self): init_db = self.get_db_temp_file() # Test empty db dao = EngineDAO(init_db.name) self._clean_dao(dao) # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEqual(len(cols), 30) cols = c.execute("SELECT * FROM States").fetchall() self.assertEqual(len(cols), 63) self.test_batch_folder_files() self.test_batch_upload_files() self.test_conflicts() self.test_errors() self.test_acquire_processors() self.test_configuration() def test_conflicts(self): self.assertEqual(self._dao.get_conflict_count(), 3) self.assertEqual(len(self._dao.get_conflicts()), 3) def test_errors(self): self.assertEqual(self._dao.get_error_count(), 1) self.assertEqual(self._dao.get_error_count(5), 0) self.assertEqual(len(self._dao.get_errors()), 1) row = self._dao.get_errors()[0] # Test reset error self._dao.reset_error(row) self.assertEqual(self._dao.get_error_count(), 0) row = self._dao.get_state_from_id(row.id) self.assertIsNone(row.last_error) self.assertIsNone(row.last_error_details) self.assertEqual(row.error_count, 0) # Test increase self._dao.increase_error(row, "Test") self.assertEqual(self._dao.get_error_count(), 0) self._dao.increase_error(row, "Test 2") self.assertEqual(self._dao.get_error_count(), 0) self.assertEqual(self._dao.get_error_count(1), 1) self._dao.increase_error(row, "Test 3") self.assertEqual(self._dao.get_error_count(), 0) self.assertEqual(self._dao.get_error_count(2), 1) # Synchronize with wrong version should fail self.assertFalse(self._dao.synchronize_state(row, version=row.version-1)) self.assertEqual(self._dao.get_error_count(2), 1) # Synchronize should reset error self.assertTrue(self._dao.synchronize_state(row)) self.assertEqual(self._dao.get_error_count(2), 0) def test_remote_scans(self): self.assertFalse(self._dao.is_path_scanned("/")) self._dao.add_path_scanned("/Test") self.assertTrue(self._dao.is_path_scanned("/Test")) self.assertFalse(self._dao.is_path_scanned("/Test2")) self._dao.clean_scanned() self.assertFalse(self._dao.is_path_scanned("/Test")) def test_last_sync(self): # Based only on file so not showing 2 ids = [58, 8, 62, 61, 60] files = self._dao.get_last_files(5) self.assertEqual(len(files), 5) for i in range(5): self.assertEqual(files[i].id, ids[i]) ids = [58, 62, 61, 60, 63] files = self._dao.get_last_files(5, "remote") self.assertEqual(len(files), 5) for i in range(5): self.assertEqual(files[i].id, ids[i]) ids = [8, 11, 5] files = self._dao.get_last_files(5, "local") self.assertEqual(len(files), 3) for i in range(3): self.assertEqual(files[i].id, ids[i]) def test_batch_folder_files(self): # Verify that the batch is ok ids = range(25, 47) index = 0 state = self._dao.get_state_from_id(25) #ids[index]) while index < len(ids)-1: index = index + 1 state = self._dao.get_next_folder_file(state.remote_ref) self.assertEqual(state.id, ids[index]) while index > 0: index = index - 1 state = self._dao.get_previous_folder_file(state.remote_ref) self.assertEqual(state.id, ids[index]) self.assertIsNone(self._dao.get_previous_folder_file(state.remote_ref)) # Last file is 9 state = self._dao.get_state_from_id(46) self.assertIsNone(self._dao.get_next_folder_file(state.remote_ref)) def test_batch_upload_files(self): # Verify that the batch is ok ids = [58, 62, 61, 60, 63] index = 0 state = self._dao.get_state_from_id(ids[index]) while index < len(ids)-1: index = index + 1 state = self._dao.get_next_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD) self.assertEqual(state.id, ids[index]) while index > 0: index = index - 1 state = self._dao.get_previous_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD) self.assertEqual(state.id, ids[index]) self.assertIsNone(self._dao.get_previous_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD)) # Last file is 9 state = self._dao.get_state_from_id(9) self.assertIsNone(self._dao.get_next_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD)) def test_reinit_processors(self): state = self._dao.get_state_from_id(1) self.assertEqual(state.processor, 0) def test_acquire_processors(self): self.assertTrue(self._dao.acquire_processor(666, 2)) # Cannot acquire processor if different processor self.assertFalse(self._dao.acquire_processor(777, 2)) # Can re-acquire processor if same processor self.assertTrue(self._dao.acquire_processor(666, 2)) self.assertTrue(self._dao.release_processor(666)) # Check the auto-release self.assertTrue(self._dao.acquire_processor(666, 2)) row = self._dao.get_state_from_id(2) self._dao.synchronize_state(row) self.assertFalse(self._dao.release_processor(666)) def test_configuration(self): result = self._dao.get_config("empty", "DefaultValue") self.assertEqual(result, "DefaultValue") result = self._dao.get_config("remote_user", "DefaultValue") self.assertEqual(result, "Administrator") self._dao.update_config("empty", "notAnymore") result = self._dao.get_config("empty", "DefaultValue") self.assertNotEquals(result, "DefaultValue") self._dao.update_config("remote_user", "Test") result = self._dao.get_config("remote_user", "DefaultValue") self.assertEqual(result, "Test") self._dao.update_config("empty", None) result = self._dao.get_config("empty", "DefaultValue") self.assertEqual(result, "DefaultValue") result = self._dao.get_config("empty") self.assertEqual(result, None) def test_filters(self): # Contains by default /fakeFilter/Test_Parent and /fakeFilter/Retest self.assertEqual(len(self._dao.get_filters()), 2) self._dao.remove_filter(u"/fakeFilter/Retest") self.assertEqual(len(self._dao.get_filters()), 1) self._dao.add_filter(u"/fakeFilter") # Should delete the subchild filter self.assertEqual(len(self._dao.get_filters()), 1) self._dao.add_filter(u"/otherFilter") self.assertEqual(len(self._dao.get_filters()), 2)
class EngineDAOTest(unittest.TestCase): def _get_default_db(self, name='test_engine.db'): nxdrive_path = os.path.dirname(nxdrive.__file__) return os.path.join(nxdrive_path, 'tests', 'resources', name) def _clean_dao(self, dao): dao.dispose() if sys.platform == 'win32': os.remove(dao.get_db()) def get_db_temp_file(self): tmp_db = tempfile.NamedTemporaryFile(suffix="test_db", dir=self.tmpdir) if sys.platform == 'win32': tmp_db.close() return tmp_db def setUp(self): self.build_workspace = os.environ.get('WORKSPACE') self.tmpdir = None if self.build_workspace is not None: self.tmpdir = os.path.join(self.build_workspace, "tmp") if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.tmp_db = self.get_db_temp_file() db = open(self._get_default_db(), 'rb') with open(self.tmp_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(self.tmp_db.name) def tearDown(self): self._clean_dao(self._dao) if sys.platform == 'win32' and os.path.exists(self.tmp_db.name): os.remove(self.tmp_db.name) def test_init_db(self): init_db = self.get_db_temp_file() if sys.platform != 'win32': os.remove(init_db.name) dao = EngineDAO(init_db.name) # Test filters table self.assertEquals(0, len(dao.get_filters())) # Test state table self.assertEquals(0, len(dao.get_conflicts())) # Test configuration self.assertIsNone(dao.get_config("remote_user")) # Test RemoteScan table self.assertFalse(dao.is_path_scanned("/")) self._clean_dao(dao) def test_migration_db_v1_with_duplicates(self): # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration_duplicate.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() rows = c.execute("SELECT * FROM States").fetchall() self.assertEquals(len(rows), 0) cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEquals(len(cols), 30) self.assertIsNone(self._dao.get_config("remote_last_event_log_id")) self.assertIsNone(self._dao.get_config("remote_last_full_scan")) def test_migration_db_v1(self): init_db = self.get_db_temp_file() # Test empty db dao = EngineDAO(init_db.name) self._clean_dao(dao) # Test a non empty db migrate_db = self.get_db_temp_file() db = open(self._get_default_db('test_engine_migration.db'), 'rb') with open(migrate_db.name, 'wb') as f: f.write(db.read()) self._dao = EngineDAO(migrate_db.name) c = self._dao._get_read_connection().cursor() cols = c.execute("PRAGMA table_info('States')").fetchall() self.assertEquals(len(cols), 30) cols = c.execute("SELECT * FROM States").fetchall() self.assertEquals(len(cols), 63) self.test_batch_folder_files() self.test_batch_upload_files() self.test_conflicts() self.test_errors() self.test_acquire_processors() self.test_configuration() def test_conflicts(self): self.assertEquals(self._dao.get_conflict_count(), 3) self.assertEquals(len(self._dao.get_conflicts()), 3) def test_errors(self): self.assertEquals(self._dao.get_error_count(), 1) self.assertEquals(self._dao.get_error_count(5), 0) self.assertEquals(len(self._dao.get_errors()), 1) row = self._dao.get_errors()[0] # Test reset error self._dao.reset_error(row) self.assertEquals(self._dao.get_error_count(), 0) row = self._dao.get_state_from_id(row.id) self.assertIsNone(row.last_error) self.assertIsNone(row.last_error_details) self.assertEqual(row.error_count, 0) # Test increase self._dao.increase_error(row, "Test") self.assertEquals(self._dao.get_error_count(), 0) self._dao.increase_error(row, "Test 2") self.assertEquals(self._dao.get_error_count(), 0) self.assertEquals(self._dao.get_error_count(1), 1) self._dao.increase_error(row, "Test 3") self.assertEquals(self._dao.get_error_count(), 0) self.assertEquals(self._dao.get_error_count(2), 1) # Synchronize with wrong version should fail self.assertFalse( self._dao.synchronize_state(row, version=row.version - 1)) self.assertEquals(self._dao.get_error_count(2), 1) # Synchronize should reset error self.assertTrue(self._dao.synchronize_state(row)) self.assertEquals(self._dao.get_error_count(2), 0) def test_remote_scans(self): self.assertFalse(self._dao.is_path_scanned("/")) self._dao.add_path_scanned("/Test") self.assertTrue(self._dao.is_path_scanned("/Test")) self.assertFalse(self._dao.is_path_scanned("/Test2")) self._dao.clean_scanned() self.assertFalse(self._dao.is_path_scanned("/Test")) def test_last_sync(self): # Based only on file so not showing 2 ids = [58, 8, 62, 61, 60] files = self._dao.get_last_files(5) self.assertEquals(len(files), 5) for i in range(5): self.assertEquals(files[i].id, ids[i]) ids = [58, 62, 61, 60, 63] files = self._dao.get_last_files(5, "remote") self.assertEquals(len(files), 5) for i in range(5): self.assertEquals(files[i].id, ids[i]) ids = [8, 11, 5] files = self._dao.get_last_files(5, "local") self.assertEquals(len(files), 3) for i in range(3): self.assertEquals(files[i].id, ids[i]) def test_batch_folder_files(self): # Verify that the batch is ok ids = range(25, 47) index = 0 state = self._dao.get_state_from_id(25) #ids[index]) while index < len(ids) - 1: index = index + 1 state = self._dao.get_next_folder_file(state.remote_ref) self.assertEquals(state.id, ids[index]) while index > 0: index = index - 1 state = self._dao.get_previous_folder_file(state.remote_ref) self.assertEquals(state.id, ids[index]) self.assertIsNone(self._dao.get_previous_folder_file(state.remote_ref)) # Last file is 9 state = self._dao.get_state_from_id(46) self.assertIsNone(self._dao.get_next_folder_file(state.remote_ref)) def test_batch_upload_files(self): # Verify that the batch is ok ids = [58, 62, 61, 60, 63] index = 0 state = self._dao.get_state_from_id(ids[index]) while index < len(ids) - 1: index = index + 1 state = self._dao.get_next_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD) self.assertEquals(state.id, ids[index]) while index > 0: index = index - 1 state = self._dao.get_previous_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD) self.assertEquals(state.id, ids[index]) self.assertIsNone( self._dao.get_previous_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD)) # Last file is 9 state = self._dao.get_state_from_id(9) self.assertIsNone( self._dao.get_next_sync_file(state.remote_ref, Engine.BATCH_MODE_UPLOAD)) def test_reinit_processors(self): state = self._dao.get_state_from_id(1) self.assertEquals(state.processor, 0) def test_acquire_processors(self): self.assertTrue(self._dao.acquire_processor(666, 2)) # Cannot acquire processor if different processor self.assertFalse(self._dao.acquire_processor(777, 2)) # Can re-acquire processor if same processor self.assertTrue(self._dao.acquire_processor(666, 2)) self.assertTrue(self._dao.release_processor(666)) # Check the auto-release self.assertTrue(self._dao.acquire_processor(666, 2)) row = self._dao.get_state_from_id(2) self._dao.synchronize_state(row) self.assertFalse(self._dao.release_processor(666)) def test_configuration(self): result = self._dao.get_config("empty", "DefaultValue") self.assertEquals(result, "DefaultValue") result = self._dao.get_config("remote_user", "DefaultValue") self.assertEquals(result, "Administrator") self._dao.update_config("empty", "notAnymore") result = self._dao.get_config("empty", "DefaultValue") self.assertNotEquals(result, "DefaultValue") self._dao.update_config("remote_user", "Test") result = self._dao.get_config("remote_user", "DefaultValue") self.assertEquals(result, "Test") self._dao.update_config("empty", None) result = self._dao.get_config("empty", "DefaultValue") self.assertEquals(result, "DefaultValue") result = self._dao.get_config("empty") self.assertEquals(result, None) def test_filters(self): # Contains by default /fakeFilter/Test_Parent and /fakeFilter/Retest self.assertEquals(len(self._dao.get_filters()), 2) self._dao.remove_filter(u"/fakeFilter/Retest") self.assertEquals(len(self._dao.get_filters()), 1) self._dao.add_filter(u"/fakeFilter") # Should delete the subchild filter self.assertEquals(len(self._dao.get_filters()), 1) self._dao.add_filter(u"/otherFilter") self.assertEquals(len(self._dao.get_filters()), 2)
def _create_dao(self): from nxdrive.engine.dao.sqlite import EngineDAO return EngineDAO(self._get_db_file())
def update_remote_state(row, *args, **kwargs): nonlocal marker EngineDAO.update_remote_state(self.engine_1._dao, row, *args, **kwargs) if not marker and row.local_name == "File.txt": self.engine_1.local.rename(row.local_path, "Renamed File.txt") marker = True