def sync_engine_without_params(request): ''' Inits test without parametrisation ''' # set free space, so everything is synced to csp 1 csps = [ MagicMock(spec=['free_space', 'storage_id'], free_space=100 * MBYTE, storage_id=CSP_1.storage_id), MagicMock(spec=['free_space', 'storage_id'], free_space=10 * MBYTE, storage_id=CSP_2_ID) ] tasklist = [] actor = SyncEngine.start(csps=csps, task_sink=tasklist) sync_engine = actor.proxy() # shutdown sync engine after test request.addfinalizer(sync_engine.stop) return_type = namedtuple('StorageEngineWithoutFiles', ['sync_engine', 'csps', 'task_list']) return return_type(sync_engine=sync_engine, csps=csps, task_list=tasklist)
def sync_engine_without_files(mocker, request): mocker.patch('cc.config.write_hidden_file') csps = [ StorageMetrics(storage_id=CSP_1.storage_id, free_space=100 * MBYTE, display_name=CSP_1.display_name) ] tasklist = [] actor = SyncEngine.start(csps=csps, task_sink=tasklist.append) sync_engine = actor.proxy() actor._actor.initialized = True test_files = request.param return_type = namedtuple( 'StorageEngineWithoutFiles', ['test_files', 'sync_engine', 'csps', 'task_list']) # XXX: is this necessary? # sync_engine.root_node.get().add_child(name=CSP_1.name, # props=dict(is_dir=True, # size=0, # version_id='is_dir')) yield return_type(test_files=test_files, sync_engine=sync_engine, csps=csps, task_list=tasklist) sync_engine.stop()
def using(cls, client_config, storage_config, task_queue): """Build a SynchronizationLink from the given configuration and global task queue. :param configuration: configuration dictionary for the SP that should be setup with this link. See :class`cc.configuration.Config`. :type configuration: cc.configuration.Config :param task_queue: the task queue that is kept by the bademeister. :type task_queue: cc.synchronization.models.TaskQueue :param local_sync_root: path to the sync base directory :param config_dir: path to the configuration directory :return: a ready-to-use synchronization link """ # Load SyncEngine state from disk if present. # We most likely want one sync state per pair. Thus we append the storage unique id to # get a unique location for the state. sync_state_filename = "sync_state_{}".format(storage_config['id']) sync_state_file = os.path.join(client_config.config_dir, sync_state_filename) sync_state = State.fromfile(sync_state_file) # Prepare Metrics metrics = StorageMetrics(storage_id=storage_config['id'], free_space=0, display_name=storage_config['display_name']) # Setup Sync Engine sync_actor = SyncEngine.start(storage_metrics=metrics, task_sink=task_queue.put_task, model=sync_state) sync_engine = sync_actor.proxy() # getting csps where storage name matches csps = [ c for c in jars.registered_storages if c.storage_name == storage_config['type'] ] storage_cls = csps[0] remote = instantiate_storage( storage_cls, storage_id=storage_config.get('id'), config=client_config, # TODO FIXME: Properly build the selected_sync_dirs (children, path). selected_sync_dirs=storage_config.get('selected_sync_directories'), sync_engine=sync_engine) local_sync_root = os.path.join(client_config.sync_root, storage_config['display_name']) logger.info("Local Root for '%s' is '%s'.", storage_config['id'], local_sync_root) if not os.path.exists(local_sync_root): logger.error("'%s' does not exist!", local_sync_root) # FOOBAR local = prepare_local_filesystem( local_sync_root=local_sync_root, sync_engine=sync_engine, public_key_getter=partial(get_public_key_pem_by_subject, client_config), private_key_getter=partial(get_private_key_pem_by_subject, client_config)) # TODO Hack Hack Hack! This is needed by the EncryptionWrapper around the local storage. # This needs more refactoring is an improvement over the previous global config. local.client_config = client_config link = SynchronizationLink(local=local, remote=remote, actor=sync_actor, engine=sync_engine, state=sync_state, metrics=metrics, task_queue=task_queue, config_dir=client_config.config_dir) logger.info("Instantiated Link '%s'", link.link_id) return link
def test_while_csp_uldl_success(): """the ul success handler should set the property in the node """ # pylint: disable=protected-access node = MagicMock( props={ syncfsm.STORAGE: { FILESYSTEM_ID: { SIZE: MBYTE }, CSP_1.storage_id: { syncfsm.SYNC_TASK_RUNNING: True }, CSP_2_ID: { syncfsm.SYNC_TASK_RUNNING: True } } }) task = UploadSyncTask(path=['abc'], target_storage_id=CSP_1.storage_id, source_version_id=123) task.state = SyncTask.SUCCESSFUL sync_engine = SyncEngine([], None) task.target_version_id = 321 fsm = MagicMock() sync_engine._ack_updownload_task(fsm, node, task) assert not node.props[syncfsm.STORAGE][CSP_1.storage_id][ syncfsm.SYNC_TASK_RUNNING] assert node.props[syncfsm.STORAGE][CSP_2_ID][syncfsm.SYNC_TASK_RUNNING] assert (syncfsm.FILESYSTEM_ID, 123) in node.props['equivalents']['new'].items() assert (CSP_1.storage_id, 321) in node.props['equivalents']['new'].items() assert len(node.props['equivalents']['old']) == 0 task2 = UploadSyncTask(path=['abc'], target_storage_id=CSP_2_ID, source_version_id=123) task2.state = SyncTask.SUCCESSFUL task2.target_version_id = 456 sync_engine._ack_updownload_task(fsm, node, task2) assert not node.props[syncfsm.STORAGE][CSP_1.storage_id][ syncfsm.SYNC_TASK_RUNNING] assert not node.props[syncfsm.STORAGE][CSP_2_ID][syncfsm.SYNC_TASK_RUNNING] assert (syncfsm.FILESYSTEM_ID, 123) in node.props['equivalents']['new'].items() assert (CSP_1.storage_id, 321) in node.props['equivalents']['new'].items() assert (CSP_2_ID, 456) in node.props['equivalents']['new'].items() assert len(node.props['equivalents']['old']) == 0 task3 = UploadSyncTask(path=['abc'], target_storage_id=CSP_1.storage_id, source_version_id=234) task3.state = SyncTask.SUCCESSFUL task3.target_version_id = 345 sync_engine._ack_updownload_task(fsm, node, task3) assert (syncfsm.FILESYSTEM_ID, 123) in node.props['equivalents']['old'].items() assert (CSP_1.storage_id, 321) in node.props['equivalents']['old'].items() assert (CSP_2_ID, 456) in node.props['equivalents']['old'].items() assert (syncfsm.FILESYSTEM_ID, 234) in node.props['equivalents']['new'].items() assert (CSP_1.storage_id, 345) in node.props['equivalents']['new'].items() assert len(node.props['equivalents']['new'].items()) == 2 assert len(node.props['equivalents']['old'].items()) == 3
def sync_engine(storage_metrics): """Return sync engine instance""" tasks = [] return SyncEngine(storage_metrics=storage_metrics, task_sink=tasks.append)
def __init__(self): self.task_list = [] self.storage_metrics = StorageMetrics(CSP_1.storage_id, 100 * MBYTE) self.sync_engine = SyncEngine(self.storage_metrics, self.task_sink) self.link = Mock() self.link.link_id = 'mock_link_id'
class SyncEngineTester: def __init__(self): self.task_list = [] self.storage_metrics = StorageMetrics(CSP_1.storage_id, 100 * MBYTE) self.sync_engine = SyncEngine(self.storage_metrics, self.task_sink) self.link = Mock() self.link.link_id = 'mock_link_id' def task_sink(self, task): task.link = self.link self.task_list.append(task) def assert_expected_tasks(self, expected_tasks): """Assert that the tasks are in the task list regardless of the order.""" for task in expected_tasks: task.link = self.link task_list_dict = {t: t for t in self.task_list} # use a set here on the left side to get better formatted diffs print(self.task_list) print(expected_tasks) assert set(task_list_dict.keys()) == set(expected_tasks) # asserting that is no enough!, now all the attributes will be asserted as well for expected_task in expected_tasks: task_from_list = task_list_dict.pop(expected_task) print(task_from_list.__dict__) print(expected_task.__dict__) assert task_from_list.__dict__ == expected_task.__dict__ def ack_fetch_tree_task(self, storage_id, model, metrics=None): if metrics is None: metrics = StorageMetrics(storage_id=CSP_1.storage_id, free_space=10, total_space=100) for task in self.task_list: if isinstance(task, FetchFileTreeTask): if storage_id == task.storage_id: task.file_tree = model model.props['metrics'] = metrics task.state = SyncTask.SUCCESSFUL self.sync_engine.ack_task(task) self.task_list.remove(task) break else: raise AssertionError('No task to ack to') def init_with_files(self, files, equivalent=True, file_size=MBYTE): """Init sync engine with a set of files :param files: a list of paths(lists) to populate it with """ tree = Node(name=None) for file in files: tree.get_node_safe(file).props.update({ VERSION_ID: 1, SIZE: file_size }) for node in tree: if node.parent is not None and VERSION_ID not in node.props: # all nodes without a version id are auto generated by get_node_safe and are dirs # (implicitly created directories) node.props[IS_DIR] = True node.props[VERSION_ID] = IS_DIR else: node.props[IS_DIR] = False self.sync_engine.merge_storage_to_sync_model(tree, FILESYSTEM_ID) self.sync_engine.merge_storage_to_sync_model(tree, CSP_1.storage_id) if equivalent: for node in tree: if node.parent is None: continue self.sync_engine.root_node.get_node(node.path).props['equivalents'] = \ {'new': {FILESYSTEM_ID: node.props[VERSION_ID], CSP_1.storage_id: node.props[VERSION_ID]}} self.sync_engine._sync_state() self.sync_engine.state = SyncEngineState.RUNNING return tree def ack_task(self, task, state=SyncTask.SUCCESSFUL): """Acks a task back to the sync engine""" self.task_list.remove(task) logging.debug('acking task:%s', task) self.sync_engine.ack_task(task) def ack_all_tasks(self, state=SyncTask.SUCCESSFUL, secure_hash_fun=lambda x: x * 2): """Pass all tasks to the acker - Set the state of each task to `state` A - Apply the `secure_hash_fun` to each version_id - Pass the task to the acker. """ # work with a copy of the task list so tasks which are newly created are not touched for task in list(self.task_list): self.task_list.remove(task) logging.debug('acking task:%s', task) task.state = state if hasattr(task, 'source_version_id'): if task.source_version_id == IS_DIR: task.target_version_id = task.source_version_id else: task.target_version_id = secure_hash_fun( task.source_version_id) self.sync_engine.ack_task(task)
def sync_engine_with_files(request, storage_metrics): """ creates a situation where file A/test_a.txt and B are in local file storage and on csp 1. Every future task should be executed on csp 1. """ csps = storage_metrics tasklist = [] actor = SyncEngine.start(csps=csps, task_sink=tasklist.append) actor._actor.initialized = True sync_engine = actor.proxy() sync_engine.storage_create( normalize_path_element(CSP_1_DISPLAY_NAME), [CSP_1_DISPLAY_NAME], dict(is_dir=True, modified_date=dt.datetime.now(), size=0, version_id='is_dir')).get() tasklist.clear() test_files = deepcopy(request.param) expected_task_list = [] future = None # creates folder structure an creates expected tasks for test_file in test_files: current_path = [] for path_elm in test_file.path[0:-1]: # assemble path while iterating it current_path.append(path_elm) # Create parent directory Event if len(current_path) > 1: sync_engine.storage_create( FILESYSTEM_ID, current_path.copy(), dict(is_dir=True, modified_date=dt.datetime.now(), storage_id=FILESYSTEM_ID, size=0, version_id='is_dir')).get() # Add Expected SyncTask expected_task_list.append( CreateDirSyncTask(path=normalize_path(current_path.copy()), target_storage_id=csps[0].storage_id, source_storage_id=FILESYSTEM_ID)) # create file future = sync_engine.storage_create( FILESYSTEM_ID, normalize_path(test_file.path), dict(modified_date=dt.datetime.now(), is_dir=test_file.is_dir, storage_id=FILESYSTEM_ID, size=MBYTE, version_id=test_file.version_id)) # sync task depends on type of item if test_file.is_dir: expected_task_list.append( CreateDirSyncTask(path=normalize_path(test_file.path), target_storage_id=csps[0].storage_id, source_storage_id=FILESYSTEM_ID)) else: expected_task_list.append( UploadSyncTask(path=normalize_path(test_file.path), source_version_id=test_file.version_id, target_storage_id=csps[0].storage_id)) future.get() # check state of engine for each sub path # TODO: assert sp folder does not have fsm for test_file in test_files: # sp folder does not have an fsm sub_paths = get_sub_paths(test_file.path) for sub_path in sub_paths[1:]: props = sync_engine.query(sub_path).get() assert props[SE_FSM].current == S_UPLOADING # assert if all expected tasks are in the tasklist assert set(expected_task_list) == set(tasklist) ack_all_tasks(tasklist, sync_engine.ack_task) # XXX: does the same thing as above. refactor? # # check state of engine for path # for test_file in test_files: # sub_paths = get_sub_paths(test_file.path) # for sub_path in sub_paths: # props = sync_engine.query(sub_path).get() # assert props[SE_FSM].current == S_UPLOADING csp_id = csps[0].storage_id for test_file in test_files: current_path = [] for pathelm in test_file.path[:-1]: # assemble path while iterating it current_path.append(pathelm) # Create Directory Event sync_engine.storage_create(storage_id=csp_id, path=current_path, event_props=dict( modified_date=dt.datetime.now(), is_dir=True, storage_id=csp_id, version_id='is_dir', size=0)).get() if test_file.version_id == 'is_dir': vid = 'is_dir' else: vid = test_file.version_id * 2 sync_engine.storage_create( storage_id=csp_id, path=test_file.path, event_props=dict( modified_date=dt.datetime.now(), is_dir=test_file.is_dir, storage_id=csp_id, version_id=vid, # very secure hashing size=MBYTE)).get() # add the csp to the csp list test_file.csps.append(csps[0].storage_id) # check state of engine for each path for test_file in test_files: sub_paths = get_sub_paths(test_file.path) # sub_paths.reverse() for sub_path in sub_paths[1:]: props = sync_engine.query(sub_path).get() assert props[SE_FSM].current == S_SYNCED # \ # , \ # 'path ' + str(sub_path) + ' not in sync state' assert MODIFIED_DATE in props[STORAGE][FILESYSTEM_ID] assert MODIFIED_DATE in props[STORAGE][csp_id] assert len(tasklist) == 0 return_type = namedtuple( 'StorageEngineWithFiles', ['test_files', 'sync_engine', 'csps', 'task_list']) yield return_type(test_files=test_files, csps=csps, task_list=tasklist, sync_engine=sync_engine) sync_engine.stop()