def with_compat_node(func): """ This decorator is used to transform nodes properties for openstack nodes with version 2.X to be compatible with new nodes support by version 3.x :param func: The decorated function :return: Wrapped function """ def wrapper(**kwargs): ctx = kwargs.get('ctx', CloudifyContext) # Resolve the actual context which need to run operation, # the context could be belongs to relationship context or actual # node context ctx_node = resolve_ctx(ctx) # Check to see if we need to do properties transformation or not kwargs_config = {} if is_compat_node(ctx_node): compat = Compat(context=ctx_node, **kwargs) kwargs_config = compat.transform() if not kwargs_config: kwargs_config = kwargs func(**kwargs_config) update_runtime_properties_for_node_v2(ctx_node, kwargs_config) return operation(func=wrapper, resumable=True)
def aws_relationship(class_decl=None, resource_type='AWS Resource'): '''AWS resource decorator''' def wrapper_outer(function): '''Outer function''' def wrapper_inner(**kwargs): '''Inner, worker function''' ctx = kwargs['ctx'] # Add new operation arguments kwargs['resource_type'] = resource_type kwargs['iface'] = class_decl( ctx.source.node, logger=ctx.logger, resource_id=utils.get_resource_id( node=ctx.source.node, instance=ctx.source.instance, raise_on_missing=True)) if class_decl else None kwargs['resource_config'] = kwargs.get('resource_config') or dict() # Check if using external if ctx.source.node.properties.get('use_external_resource', False): resource_id = utils.get_resource_id( node=ctx.source.node, instance=ctx.source.instance) ctx.logger.info('%s ID# "%s" is user-provided.' % (resource_type, resource_id)) force_op = kwargs.get('force_operation', False) old_target = ctx.target.node.properties.get( 'use_external_resource', False) if not force_op and not old_target: ctx.logger.info( '%s ID# "%s" does not have force_operation ' 'set but target ID "%s" is new, therefore ' 'execution relationship operation.' % (resource_type, ctx.target.instance.runtime_properties[EXT_RES_ID], resource_id)) elif not kwargs.get('force_operation', False): return ctx.logger.warn('%s ID# "%s" has force_operation set.' % (resource_type, resource_id)) # Execute the function ret = function(**kwargs) # When modifying nested runtime properties, the internal # "dirty checking" mechanism will not know of our changes. # This forces the internal tracking to mark the properties as # dirty and will be refreshed on next query. # pylint: disable=W0212 ctx.source.instance.runtime_properties._set_changed() ctx.target.instance.runtime_properties._set_changed() return ret return wrapper_inner return operation(func=wrapper_outer, resumable=True)
def with_kubernetes_client(function): def wrapper(**kwargs): configuration_property = _retrieve_property( ctx.instance, NODE_PROPERTY_CONFIGURATION) authentication_property = _retrieve_property( ctx.instance, NODE_PROPERTY_AUTHENTICATION) try: kwargs['client'] = CloudifyKubernetesClient( ctx.logger, KubernetesApiConfigurationVariants( ctx.logger, configuration_property, download_resource=ctx.download_resource), KubernetesApiAuthenticationVariants(ctx.logger, authentication_property)) function(**kwargs) except KuberentesApiInitializationFailedError as e: error_traceback = generate_traceback_exception() ctx.logger.error('Error traceback {0} with message {1}'.format( error_traceback['traceback'], error_traceback['message'])) raise RecoverableError('{0}'.format(str(e)), causes=[error_traceback]) except OperationRetry as e: error_traceback = generate_traceback_exception() ctx.logger.error('Error traceback {0} with message {1}'.format( error_traceback['traceback'], error_traceback['message'])) raise OperationRetry('{0}'.format(str(e)), retry_after=15, causes=[error_traceback]) except NonRecoverableError as e: error_traceback = generate_traceback_exception() ctx.logger.error('Error traceback {0} with message {1}'.format( error_traceback['traceback'], error_traceback['message'])) raise NonRecoverableError('{0}'.format(str(e)), causes=[error_traceback]) except Exception as e: error_traceback = generate_traceback_exception() ctx.logger.error('Error traceback {0} with message {1}'.format( error_traceback['traceback'], error_traceback['message'])) raise RecoverableError('{0}'.format(str(e)), causes=[error_traceback]) return operation(func=wrapper, resumable=True)
def operation_cleanup(func, force=False): def wrapper(*args, **kwargs): ctx = kwargs.get('ctx', CloudifyContext) # rerun operation in any case force_rerun = kwargs.get('force_rerun', force) # check current operation state if ctx.type == context.NODE_INSTANCE: current_action = ctx.operation.name operations_finished = ctx.instance.runtime_properties.get( FINISHED_OPERATIONS, {}) if not force_rerun and operations_finished.get(current_action): ctx.logger.debug( "Opration {operation} is finished before." .format(operation=current_action)) return # run real operation result = func(*args, **kwargs) # check current operation if ctx.type == context.NODE_INSTANCE: current_action = ctx.operation.name if current_action == DELETE_NODE_ACTION: # cleanup runtime properties # need to convert generaton to list, python 3 for key, _ in list(ctx.instance.runtime_properties.items()): del ctx.instance.runtime_properties[key] else: # mark oparation as finished operations_finished = ctx.instance.runtime_properties.get( FINISHED_OPERATIONS, {}) operations_finished[current_action] = True # revert start on stop if current_action == STOP_NODE_ACTION: operations_finished[START_NODE_ACTION] = False # copy flags back ctx.instance.runtime_properties[ FINISHED_OPERATIONS] = operations_finished # save flag as current state before external call ctx.instance.runtime_properties.dirty = True ctx.instance.update() return result return operation(func=wrapper, resumable=True)
def check_swift_resource(func): def wrapper(**kwargs): ctx = kwargs['ctx'] node_type = ctx.node.type if node_type and node_type.startswith(SWIFT_NODE_PREFIX): response = None swift_config = ctx.node.properties.get('swift_config') username = swift_config.get('swift_username') password = swift_config.get('swift_password') auth_url = swift_config.get('swift_auth_url') region_name = swift_config.get('swift_region_name') aws_config = {} # Only Generate the token if it is not generated before if not ctx.instance.runtime_properties.get('aws_config'): endpoint_url, token = \ utils.generate_swift_access_config(auth_url, username, password) aws_config['aws_access_key_id'] = username aws_config['aws_secret_access_key'] = token aws_config['region_name'] = region_name aws_config['endpoint_url'] = endpoint_url ctx.instance.runtime_properties['aws_config'] = aws_config try: kwargs['aws_config'] = aws_config kwargs['ctx'] = ctx response = func(**kwargs) except ClientError as err: _, _, tb = sys.exc_info() error = err.response.get('Error') error_code = error.get('Code', 'Unknown') if error_code == SWIFT_ERROR_TOKEN_CODE: endpoint_url, token = \ utils.generate_swift_access_config(auth_url, username, password) # Reset the old "aws_config" and generate new one del ctx.instance.runtime_properties['aws_config'] aws_config = {} aws_config['aws_access_key_id'] = username aws_config['aws_secret_access_key'] = token aws_config['region_name'] = region_name aws_config['endpoint_url'] = endpoint_url ctx.instance.runtime_properties['aws_config'] =\ aws_config raise OperationRetry( 'Re-try the operation and generate new token' ' and endpoint url for swift connection', retry_after=10, causes=[exception_to_error_cause(error, tb)]) except Exception as error: error_traceback = utils.get_traceback_exception() raise NonRecoverableError('{0}'.format(text_type(error)), causes=[error_traceback]) return response return func(**kwargs) return operation(func=wrapper, resumable=True)
def aws_resource(class_decl=None, resource_type='AWS Resource', ignore_properties=False): '''AWS resource decorator''' def wrapper_outer(function): '''Outer function''' def wrapper_inner(**kwargs): '''Inner, worker function''' ctx = kwargs['ctx'] _, _, _, operation_name = ctx.operation.name.split('.') props = ctx.node.properties runtime_instance_properties = ctx.instance.runtime_properties # Override the resource ID if needed resource_id = kwargs.get(EXT_RES_ID) if resource_id and not \ ctx.instance.runtime_properties.get(EXT_RES_ID): ctx.instance.runtime_properties[EXT_RES_ID] = resource_id if resource_id and not \ ctx.instance.runtime_properties.get(EXT_RES_ARN): ctx.instance.runtime_properties[EXT_RES_ARN] = resource_id # Override any runtime properties if needed runtime_properties = kwargs.get('runtime_properties') or dict() for key, val in runtime_properties.items(): ctx.instance.runtime_properties[key] = val # Add new operation arguments kwargs['resource_type'] = resource_type # Check if "aws_config" is provided # if "client_config" is empty, then the current node is a swift # node and the "aws_config" will be taken as "aws_config" for # boto3 config in order to use the S3 API aws_config = ctx.instance.runtime_properties.get('aws_config') aws_config_kwargs = kwargs.get('aws_config') # Attribute needed for AWS resource class class_decl_attr = { 'ctx_node': ctx.node, 'logger': ctx.logger, 'resource_id': utils.get_resource_id(node=ctx.node, instance=ctx.instance), } # Check if "aws_config" is set and has a valid "dict" type because # the expected data type for "aws_config" must be "dict" if aws_config: if isinstance(aws_config, dict): class_decl_attr.update({'aws_config': aws_config}) else: # Raise an error if the provided "aws_config" is not a # valid dict data type raise NonRecoverableError( 'aws_config is invalid type: {0}, it must be ' 'valid dict type'.format(type(aws_config))) # Check the value of "aws_config" which could be part of "kwargs" # and it has to be the same validation for the above "aws_config" elif aws_config_kwargs: if isinstance(aws_config_kwargs, dict): class_decl_attr.update({'aws_config': aws_config_kwargs}) else: # Raise an error if the provided "aws_config_kwargs" # is not a valid dict data type raise NonRecoverableError( 'aws_config is invalid type: {0}, it must be ' 'valid dict type'.format(type(aws_config))) kwargs['iface'] =\ class_decl(**class_decl_attr) if class_decl else None resource_config = None if not ignore_properties: # Normalize resource_config property resource_config = props.get('resource_config') or dict() resource_config_kwargs = \ resource_config.get('kwargs') or dict() if 'kwargs' in resource_config: del resource_config['kwargs'] resource_config.update(resource_config_kwargs) # Update the argument kwargs['resource_config'] = kwargs.get('resource_config') or \ resource_config or dict() # ``resource_config`` could be part of the runtime instance # properties, If ``resource_config`` is empty then check if it # exists on runtime instance properties if not resource_config and runtime_instance_properties \ and runtime_instance_properties.get('resource_config'): kwargs['resource_config'] =\ runtime_instance_properties['resource_config'] resource_config = kwargs['resource_config'] resource_id = utils.get_resource_id(node=ctx.node, instance=ctx.instance) # Check if using external if ctx.node.properties.get('use_external_resource', False): ctx.logger.info('%s ID# "%s" is user-provided.' % (resource_type, resource_id)) if not kwargs.get('force_operation', False): # If "force_operation" is not set then we need to make # sure that runtime properties for node instance are # setting correctly # Set "resource_config" and "EXT_RES_ID" ctx.instance.runtime_properties[ 'resource_config'] = resource_config ctx.instance.runtime_properties[EXT_RES_ID] = resource_id if operation_name not in ['delete', 'create'] and \ not kwargs['iface'].verify_resource_exists(): raise NonRecoverableError( 'Resource type {0} resource_id ' '{1} not found.'.format( kwargs['resource_type'], kwargs['iface'].resource_id)) kwargs['iface'].populate_resource(ctx) return ctx.logger.warn('%s ID# "%s" has force_operation set.' % (resource_type, resource_id)) result = function(**kwargs) if ctx.operation.name == 'cloudify.interfaces.lifecycle.configure': kwargs['iface'].populate_resource(ctx) if ctx.operation.name == 'cloudify.interfaces.lifecycle.delete': # cleanup runtime after delete keys = list(ctx.instance.runtime_properties.keys()) for key in keys: del ctx.instance.runtime_properties[key] return result return wrapper_inner return operation(func=wrapper_outer, resumable=True)
def _setup_env( self, workflow_methods=None, operation_methods=None, use_existing_env=True, name=None, inputs=None, create_blueprint_func=None, workflow_parameters_schema=None, load_env=False, ignored_modules=None, operation_retries=None, operation_retry_interval=None, provider_context=None, ): if create_blueprint_func is None: create_blueprint_func = self._blueprint_1 def stub_op(ctx, **_): pass if operation_methods is None: operation_methods = [stub_op] if workflow_methods[0] is None: def workflow_method(ctx, **_): instance = _instance(ctx, "node") instance.set_state("state").get() instance.execute_operation("test.op0") workflow_methods = [workflow_method] # same as @workflow above the method workflow_methods = [workflow(m) for m in workflow_methods] # same as @operation above each op method operation_methods = [operation(m) for m in operation_methods] temp_module = self._create_temp_module() for workflow_method in workflow_methods: setattr(temp_module, workflow_method.__name__, workflow_method) for operation_method in operation_methods: setattr(temp_module, operation_method.__name__, operation_method) blueprint = create_blueprint_func( workflow_methods, operation_methods, workflow_parameters_schema, ignored_modules, operation_retries, operation_retry_interval, ) inner_dir = os.path.join(self.blueprint_dir, "inner") if not os.path.isdir(self.blueprint_dir): os.mkdir(self.blueprint_dir) if not os.path.isdir(inner_dir): os.mkdir(inner_dir) with open(os.path.join(inner_dir, "imported.yaml"), "w") as f: f.write("node_types: { imported_type: {} }") with open(os.path.join(self.blueprint_dir, "resource"), "w") as f: f.write("content") blueprint_path = os.path.join(self.blueprint_dir, "blueprint.yaml") with open(blueprint_path, "w") as f: f.write(yaml.safe_dump(blueprint)) if not self.env or not use_existing_env: if load_env: self.env = self._load_env(name) else: self.env = self._init_env( blueprint_path, inputs=inputs, name=name, ignored_modules=ignored_modules, provider_context=provider_context, )
def patch_custom_operation(self, new_operation): # celery caches tasks so we force use of stub _task global custom_operation custom_operation = operation(new_operation, force_not_celery=True)