def check_resource(self, cnxt, resource_id, current_traversal, data, is_update, adopt_stack_data): """Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. """ resource_data = dict(sync_point.deserialize_input_data(data)) rsrc, rsrc_owning_stack, stack = check_resource.load_resource( cnxt, resource_id, resource_data, is_update) if rsrc is None: return if current_traversal != stack.current_traversal: LOG.debug('[%s] Traversal cancelled; stopping.', current_traversal) return msg_queue = eventlet.queue.LightQueue() try: self.thread_group_mgr.add_msg_queue(stack.id, msg_queue) cr = check_resource.CheckResource(self.engine_id, self._rpc_client, self.thread_group_mgr, msg_queue) cr.check(cnxt, resource_id, current_traversal, resource_data, is_update, adopt_stack_data, rsrc, stack) finally: self.thread_group_mgr.remove_msg_queue(None, stack.id, msg_queue)
def check_resource(self, cnxt, resource_id, current_traversal, data, is_update, adopt_stack_data): """Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. """ resource_data = dict(sync_point.deserialize_input_data(data)) rsrc, rsrc_owning_stack, stack = check_resource.load_resource( cnxt, resource_id, resource_data, is_update) if rsrc is None: return msg_queue = eventlet.queue.LightQueue() try: self.thread_group_mgr.add_msg_queue(stack.id, msg_queue) if current_traversal != stack.current_traversal: LOG.debug('[%s] Traversal cancelled; re-trigerring.', current_traversal) self._retrigger_replaced(is_update, rsrc, stack, msg_queue) else: cr = check_resource.CheckResource(self.engine_id, self._rpc_client, self.thread_group_mgr, msg_queue) cr.check(cnxt, resource_id, current_traversal, resource_data, is_update, adopt_stack_data, rsrc, stack) finally: self.thread_group_mgr.remove_msg_queue(None, stack.id, msg_queue)
def check_resource(self, cnxt, resource_id, current_traversal, data, is_update, adopt_stack_data): """Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. """ resource_data = dict(sync_point.deserialize_input_data(data)) rsrc, rsrc_owning_stack, stack = self._load_resource(cnxt, resource_id, resource_data, is_update) if rsrc is None: return if current_traversal != stack.current_traversal: LOG.debug("[%s] Traversal cancelled; stopping.", current_traversal) return if stack.has_timed_out(): self._handle_stack_timeout(cnxt, stack) return tmpl = stack.t stack.adopt_stack_data = adopt_stack_data if is_update: if rsrc.replaced_by is not None and rsrc.current_template_id != tmpl.id: return check_resource_done = self._do_check_resource( cnxt, current_traversal, tmpl, resource_data, is_update, rsrc, stack, adopt_stack_data ) if check_resource_done: # initiate check on next set of resources from graph self._initiate_propagate_resource(cnxt, resource_id, current_traversal, is_update, rsrc, stack)
def _load_resource(self, cnxt, resource_id, data, is_update): adopt_data = data.get('adopt_stack_data') data = dict(sync_point.deserialize_input_data(data)) cache_data = {in_data.get( 'name'): in_data for in_data in data.values() if in_data is not None} cache_data['adopt_stack_data'] = adopt_data rsrc, stack = None, None try: rsrc, stack = resource.Resource.load(cnxt, resource_id, is_update, cache_data) except (exception.ResourceNotFound, exception.NotFound): pass # can be ignored return rsrc, stack
def _load_resource(self, cnxt, resource_id, data, is_update): adopt_data = data.get('adopt_stack_data') data = dict(sync_point.deserialize_input_data(data)) cache_data = { in_data.get('name'): in_data for in_data in data.values() if in_data is not None } cache_data['adopt_stack_data'] = adopt_data rsrc, stack = None, None try: rsrc, stack = resource.Resource.load(cnxt, resource_id, is_update, cache_data) except (exception.ResourceNotFound, exception.NotFound): pass # can be ignored return rsrc, stack
def check_resource(self, cnxt, resource_id, current_traversal, data, is_update, adopt_stack_data): """Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. """ resource_data = dict(sync_point.deserialize_input_data(data)) rsrc, rsrc_owning_stack, stack = self._load_resource(cnxt, resource_id, resource_data, is_update) if rsrc is None: return if current_traversal != stack.current_traversal: LOG.debug('[%s] Traversal cancelled; stopping.', current_traversal) return if stack.has_timed_out(): self._handle_stack_timeout(cnxt, stack) return tmpl = stack.t stack.adopt_stack_data = adopt_stack_data stack.thread_group_mgr = self.thread_group_mgr if is_update: if (rsrc.replaced_by is not None and rsrc.current_template_id != tmpl.id): return check_resource_done = self._do_check_resource(cnxt, current_traversal, tmpl, resource_data, is_update, rsrc, stack, adopt_stack_data) if check_resource_done: # initiate check on next set of resources from graph self._initiate_propagate_resource(cnxt, resource_id, current_traversal, is_update, rsrc, stack)
def test_sync_non_waiting(self): ctx = utils.dummy_context() stack = tools.get_stack('test_stack', utils.dummy_context(), template=tools.string_template_five, convergence=True) stack.converge_stack(stack.t, action=stack.CREATE) resource = stack['A'] graph = stack.convergence_dependencies.graph() sender = (3, True) mock_callback = mock.Mock() sync_point.sync(ctx, resource.id, stack.current_traversal, True, mock_callback, set(graph[(resource.id, True)]), {sender: None}) updated_sync_point = sync_point.get(ctx, resource.id, stack.current_traversal, True) input_data = sync_point.deserialize_input_data( updated_sync_point.input_data) self.assertEqual({sender: None}, input_data) self.assertTrue(mock_callback.called)
def check_resource(self, cnxt, resource_id, current_traversal, data, is_update, adopt_stack_data): """Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. """ resource_data = dict(sync_point.deserialize_input_data(data)) rsrc, rsrc_owning_stack, stack = check_resource.load_resource( cnxt, resource_id, resource_data, is_update) if rsrc is None: return if current_traversal != stack.current_traversal: LOG.debug('[%s] Traversal cancelled; stopping.', current_traversal) return cr = check_resource.CheckResource(self.engine_id, self._rpc_client, self.thread_group_mgr) cr.check(cnxt, resource_id, current_traversal, resource_data, is_update, adopt_stack_data, rsrc, stack)
def check_resource(self, cnxt, resource_id, current_traversal, data, is_update, adopt_stack_data, converge=False): """Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. """ in_data = sync_point.deserialize_input_data(data) resource_data = node_data.load_resources_data(in_data if is_update else {}) rsrc, stk_defn, stack = check_resource.load_resource(cnxt, resource_id, resource_data, current_traversal, is_update) if rsrc is None: return rsrc.converge = converge msg_queue = eventlet.queue.LightQueue() try: self.thread_group_mgr.add_msg_queue(stack.id, msg_queue) cr = check_resource.CheckResource(self.engine_id, self._rpc_client, self.thread_group_mgr, msg_queue, in_data) if current_traversal != stack.current_traversal: LOG.debug('[%s] Traversal cancelled; re-trigerring.', current_traversal) self._retrigger_replaced(is_update, rsrc, stack, cr) else: cr.check(cnxt, resource_id, current_traversal, resource_data, is_update, adopt_stack_data, rsrc, stack) finally: self.thread_group_mgr.remove_msg_queue(None, stack.id, msg_queue)
def check_resource(self, cnxt, resource_id, current_traversal, data, is_update): ''' Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. ''' adopt_data = data.get('adopt_stack_data') data = dict(sync_point.deserialize_input_data(data)) try: cache_data = {in_data.get( 'name'): in_data for in_data in data.values() if in_data is not None} cache_data['adopt_stack_data'] = adopt_data rsrc, stack = resource.Resource.load(cnxt, resource_id, is_update, cache_data) except (exception.ResourceNotFound, exception.NotFound): return tmpl = stack.t if current_traversal != rsrc.stack.current_traversal: LOG.debug('[%s] Traversal cancelled; stopping.', current_traversal) return current_deps = ([tuple(i), (tuple(j) if j is not None else None)] for i, j in rsrc.stack.current_deps['edges']) deps = dependencies.Dependencies(edges=current_deps) graph = deps.graph() if is_update: if (rsrc.replaced_by is not None and rsrc.current_template_id != tmpl.id): return try: check_resource_update(rsrc, tmpl.id, data, self.engine_id) except resource.UpdateReplace: new_res_id = rsrc.make_replacement(tmpl.id) LOG.info("Replacing resource with new id %s", new_res_id) data = sync_point.serialize_input_data(data) self._rpc_client.check_resource(cnxt, new_res_id, current_traversal, data, is_update) return except resource.UpdateInProgress: if self._try_steal_engine_lock(cnxt, resource_id): self._rpc_client.check_resource(cnxt, resource_id, current_traversal, data, is_update) return except exception.ResourceFailure as ex: reason = 'Resource %s failed: %s' % (stack.action, six.text_type(ex)) self._handle_resource_failure( cnxt, stack.id, current_traversal, reason) return input_data = construct_input_data(rsrc) else: try: check_resource_cleanup(rsrc, tmpl.id, data, self.engine_id) except resource.UpdateInProgress: if self._try_steal_engine_lock(cnxt, resource_id): self._rpc_client.check_resource(cnxt, resource_id, current_traversal, data, is_update) return except exception.ResourceFailure as ex: reason = 'Resource %s failed: %s' % (stack.action, six.text_type(ex)) self._handle_resource_failure( cnxt, stack.id, current_traversal, reason) return graph_key = (resource_id, is_update) if graph_key not in graph and rsrc.replaces is not None: # If we are a replacement, impersonate the replaced resource for # the purposes of calculating whether subsequent resources are # ready, since everybody has to work from the same version of the # graph. Our real resource ID is sent in the input_data, so the # dependencies will get updated to point to this resource in time # for the next traversal. graph_key = (rsrc.replaces, is_update) try: for req, fwd in deps.required_by(graph_key): propagate_check_resource( cnxt, self._rpc_client, req, current_traversal, set(graph[(req, fwd)]), graph_key, input_data if fwd else None, fwd) check_stack_complete(cnxt, rsrc.stack, current_traversal, resource_id, deps, is_update) except sync_point.SyncPointNotFound: # Reload the stack to determine the current traversal, and check # the SyncPoint for the current node to determine if it is ready. # If it is, then retrigger the current node with the appropriate # data for the latest traversal. stack = parser.Stack.load(cnxt, stack_id=rsrc.stack.id) if current_traversal == rsrc.stack.current_traversal: LOG.debug('[%s] Traversal sync point missing.', current_traversal) return current_traversal = stack.current_traversal current_deps = ([tuple(i), (tuple(j) if j is not None else None)] for i, j in stack.current_deps['edges']) deps = dependencies.Dependencies(edges=current_deps) key = sync_point.make_key(resource_id, current_traversal, is_update) predecessors = deps.graph()[key] def do_check(target_key, data): self.check_resource(resource_id, current_traversal, data) try: sync_point.sync(cnxt, resource_id, current_traversal, is_update, do_check, predecessors, {key: None}) except sync_point.sync_points.NotFound: pass
def check_resource(self, cnxt, resource_id, current_traversal, data, is_update): ''' Process a node in the dependency graph. The node may be associated with either an update or a cleanup of its associated resource. ''' data = dict(sync_point.deserialize_input_data(data)) try: cache_data = {in_data.get( 'name'): in_data for in_data in data.values() if in_data is not None} rsrc, stack = resource.Resource.load(cnxt, resource_id, cache_data) except (exception.ResourceNotFound, exception.NotFound): return tmpl = stack.t if current_traversal != rsrc.stack.current_traversal: LOG.debug('[%s] Traversal cancelled; stopping.', current_traversal) return current_deps = ([tuple(i), (tuple(j) if j is not None else None)] for i, j in rsrc.stack.current_deps['edges']) deps = dependencies.Dependencies(edges=current_deps) graph = deps.graph() if is_update: if (rsrc.replaced_by is not None and rsrc.current_template_id != tmpl.id): return try: check_resource_update(rsrc, tmpl.id, data) except resource.UpdateReplace: new_res_id = rsrc.make_replacement() self._rpc_client.check_resource(cnxt, new_res_id, current_traversal, data, is_update) return except resource.UpdateInProgress: return input_data = construct_input_data(rsrc) else: try: check_resource_cleanup(rsrc, tmpl.id, data) except resource.UpdateInProgress: return graph_key = (rsrc.id, is_update) if graph_key not in graph and rsrc.replaces is not None: # If we are a replacement, impersonate the replaced resource for # the purposes of calculating whether subsequent resources are # ready, since everybody has to work from the same version of the # graph. Our real resource ID is sent in the input_data, so the # dependencies will get updated to point to this resource in time # for the next traversal. graph_key = (rsrc.replaces, is_update) try: for req, fwd in deps.required_by(graph_key): propagate_check_resource( cnxt, self._rpc_client, req, current_traversal, set(graph[(req, fwd)]), graph_key, input_data if fwd else None, fwd) check_stack_complete(cnxt, rsrc.stack, current_traversal, rsrc.id, deps, is_update) except sync_point.SyncPointNotFound: # NOTE(sirushtim): Implemented by spec # convergence-concurrent-workflow pass