def ir_node_for_log(expr, event, topic_nodes, data_nodes, context): """Taking IR nodes as arguments, create the IR node for a Log statement. Arguments: expr: The original Log expression buf: A pre-allocated buffer for the output _maxlen: The length of the buffer, for sanity checking event: The Event type topic_nodes: list of IRnodes which calculate the event topics data_nodes: list of IRnodes which calculate the event data context: current memory/frame context """ topics = _encode_log_topics(expr, event.event_id, topic_nodes, context) data = ir_tuple_from_args(data_nodes) bufsz = data.typ.abi_type.size_bound() buf = context.new_internal_variable(get_type_for_exact_size(bufsz)) # encode_data is an IRnode which, cleverly, both encodes the data # and returns the length of the encoded data as a stack item. encode_data = abi_encode(buf, data, context, returns_len=True, bufsz=bufsz) assert len(topics) <= 4, "too many topics" # sanity check log_opcode = "log" + str(len(topics)) return IRnode.from_list( [log_opcode, buf, encode_data] + topics, add_gas_estimate=_gas_bound(len(topics), bufsz), annotation=f"LOG event {event.signature}", )
def _pack_arguments(contract_sig, args, context, pos): # abi encoding just treats all args as a big tuple args_tuple_t = TupleType([x.typ for x in args]) args_as_tuple = LLLnode.from_list(["multi"] + [x for x in args], typ=args_tuple_t) args_abi_t = args_tuple_t.abi_type # sanity typecheck - make sure the arguments can be assigned dst_tuple_t = TupleType([arg.typ for arg in contract_sig.args][:len(args)]) check_assign(dummy_node_for_type(dst_tuple_t), args_as_tuple) if contract_sig.return_type is not None: return_abi_t = calculate_type_for_external_return( contract_sig.return_type).abi_type # we use the same buffer for args and returndata, # so allocate enough space here for the returndata too. buflen = max(args_abi_t.size_bound(), return_abi_t.size_bound()) else: buflen = args_abi_t.size_bound() buflen += 32 # padding for the method id buf_t = get_type_for_exact_size(buflen) buf = context.new_internal_variable(buf_t) args_ofst = buf + 28 args_len = args_abi_t.size_bound() + 4 abi_signature = contract_sig.name + dst_tuple_t.abi_type.selector_name() # layout: # 32 bytes | args # 0x..00<method_id_4bytes> | args # the reason for the left padding is just so the alignment is easier. # if we were only targeting constantinople, we could align # to buf (and also keep code size small) by using # (mstore buf (shl signature.method_id 224)) mstore_method_id = [["mstore", buf, util.abi_method_id(abi_signature)]] if len(args) == 0: encode_args = ["pass"] else: encode_args = abi_encode(buf + 32, args_as_tuple, context, pos, bufsz=buflen) return buf, mstore_method_id + [encode_args], args_ofst, args_len
def make_return_stmt(lll_val: LLLnode, stmt: Any, context: Context) -> Optional[LLLnode]: sig = context.sig jump_to_exit = ["exit_to", f"_sym_{sig.exit_sequence_label}"] _pos = getpos(stmt) if context.return_type is None: if stmt.value is not None: return None # triggers an exception else: # sanity typecheck check_assign(dummy_node_for_type(context.return_type), lll_val) # helper function def finalize(fill_return_buffer): # do NOT bypass this. jump_to_exit may do important function cleanup. fill_return_buffer = LLLnode.from_list( fill_return_buffer, annotation=f"fill return buffer {sig._lll_identifier}") cleanup_loops = "cleanup_repeat" if context.forvars else "pass" # NOTE: because stack analysis is incomplete, cleanup_repeat must # come after fill_return_buffer otherwise the stack will break return LLLnode.from_list( ["seq", fill_return_buffer, cleanup_loops, jump_to_exit], pos=_pos, ) if context.return_type is None: jump_to_exit += ["return_pc"] return finalize(["pass"]) if context.is_internal: dst = LLLnode.from_list(["return_buffer"], typ=context.return_type, location="memory") fill_return_buffer = make_setter(dst, lll_val, pos=_pos) jump_to_exit += ["return_pc"] return finalize(fill_return_buffer) else: # return from external function lll_val = wrap_value_for_external_return(lll_val) external_return_type = calculate_type_for_external_return( context.return_type) maxlen = external_return_type.abi_type.size_bound() return_buffer_ofst = context.new_internal_variable( get_type_for_exact_size(maxlen)) # encode_out is cleverly a sequence which does the abi-encoding and # also returns the length of the output as a stack element encode_out = abi_encode(return_buffer_ofst, lll_val, context, pos=_pos, returns_len=True, bufsz=maxlen) # previously we would fill the return buffer and push the location and length onto the stack # inside of the `seq_unchecked` thereby leaving it for the function cleanup routine expects # the return_ofst and return_len to be on the stack # CMC introduced `goto` with args so this enables us to replace `seq_unchecked` w/ `seq` # and then just append the arguments for the cleanup to the `jump_to_exit` list # check in vyper/codegen/self_call.py for an example jump_to_exit += [return_buffer_ofst, encode_out] # type: ignore return finalize(["pass"])
def make_return_stmt(ir_val: IRnode, stmt: Any, context: Context) -> Optional[IRnode]: sig = context.sig jump_to_exit = ["exit_to", f"_sym_{sig.exit_sequence_label}"] if context.return_type is None: if stmt.value is not None: return None # triggers an exception else: # sanity typecheck check_assign(dummy_node_for_type(context.return_type), ir_val) # helper function def finalize(fill_return_buffer): # do NOT bypass this. jump_to_exit may do important function cleanup. fill_return_buffer = IRnode.from_list( fill_return_buffer, annotation=f"fill return buffer {sig._ir_identifier}" ) cleanup_loops = "cleanup_repeat" if context.forvars else "pass" # NOTE: because stack analysis is incomplete, cleanup_repeat must # come after fill_return_buffer otherwise the stack will break return IRnode.from_list(["seq", fill_return_buffer, cleanup_loops, jump_to_exit]) if context.return_type is None: jump_to_exit += ["return_pc"] return finalize(["pass"]) if context.is_internal: dst = IRnode.from_list(["return_buffer"], typ=context.return_type, location=MEMORY) fill_return_buffer = make_setter(dst, ir_val) jump_to_exit += ["return_pc"] return finalize(fill_return_buffer) else: # return from external function external_return_type = calculate_type_for_external_return(context.return_type) maxlen = external_return_type.abi_type.size_bound() # optimize: if the value already happens to be ABI encoded in # memory, don't bother running abi_encode, just return the # buffer it is in. can_skip_encode = ( abi_encoding_matches_vyper(ir_val.typ) and ir_val.location == MEMORY # ensure it has already been validated - could be # unvalidated ABI encoded returndata for example and not needs_clamp(ir_val.typ, ir_val.encoding) ) if can_skip_encode: assert ir_val.typ.memory_bytes_required == maxlen # type: ignore jump_to_exit += [ir_val, maxlen] # type: ignore return finalize(["pass"]) ir_val = wrap_value_for_external_return(ir_val) # general case: abi_encode the data to a newly allocated buffer # and return the buffer return_buffer_ofst = context.new_internal_variable(get_type_for_exact_size(maxlen)) # encode_out is cleverly a sequence which does the abi-encoding and # also returns the length of the output as a stack element return_len = abi_encode(return_buffer_ofst, ir_val, context, returns_len=True, bufsz=maxlen) # append ofst and len to exit_to the cleanup subroutine jump_to_exit += [return_buffer_ofst, return_len] # type: ignore return finalize(["pass"])