def abi_decode(lll_node, src, pos=None): os = o_list(lll_node, pos=pos) lll_ret = ["seq"] parent_abi_t = abi_type_of(lll_node.typ) for i, o in enumerate(os): abi_t = abi_type_of(o.typ) src_loc = LLLnode("src_loc", typ=o.typ, location=src.location) if parent_abi_t.is_tuple(): if abi_t.is_dynamic(): child_loc = ["add", "src", unwrap_location(src_loc)] child_loc = LLLnode.from_list(child_loc, typ=o.typ, location=src.location) else: child_loc = src_loc # descend into the child tuple lll_ret.append(abi_decode(o, child_loc, pos=pos)) else: lll_ret.append( make_setter(o, src_loc, location=o.location, pos=pos)) if i + 1 == len(os): pass # optimize out the last pointer increment else: sz = abi_t.embedded_static_size() lll_ret.append(["set", "src_loc", ["add", "src_loc", sz]]) lll_ret = ["with", "src", src, ["with", "src_loc", "src", lll_ret]] return lll_ret
def pack_logging_data(arg_nodes, arg_types, context, pos): # Checks to see if there's any data if not arg_nodes: return ["seq"], 0, None, 0 holder = ["seq"] maxlen = len(arg_nodes) * 32 # total size of all packed args (upper limit) # Unroll any function calls, to temp variables. prealloacted = {} for idx, node in enumerate(arg_nodes): if isinstance( node, (vy_ast.Str, vy_ast.Call)) and node.get("func.id") != "empty": expr = Expr(node, context) source_lll = expr.lll_node tmp_variable = context.new_internal_variable(source_lll.typ) tmp_variable_node = LLLnode.from_list( tmp_variable, typ=source_lll.typ, pos=getpos(node), location="memory", annotation=f"log_prealloacted {source_lll.typ}", ) # Copy bytes. holder.append( make_setter(tmp_variable_node, source_lll, pos=getpos(node), location="memory")) prealloacted[idx] = tmp_variable_node # Create internal variables for for dynamic and static args. static_types = [] for typ in arg_types: static_types.append( typ if not typ.is_dynamic_size else Uint256Definition()) requires_dynamic_offset = any(typ.is_dynamic_size for typ in arg_types) dynamic_offset_counter = None if requires_dynamic_offset: # TODO refactor out old type objects dynamic_offset_counter = context.new_internal_variable(BaseType(32)) dynamic_placeholder = context.new_internal_variable(BaseType(32)) static_vars = [context.new_internal_variable(i) for i in static_types] # Populate static placeholders. for i, (node, typ) in enumerate(zip(arg_nodes, arg_types)): placeholder = static_vars[i] if not isinstance(typ, ArrayValueAbstractType): holder, maxlen = pack_args_by_32( holder, maxlen, prealloacted.get(i, node), typ, context, placeholder, pos=pos, ) # Dynamic position starts right after the static args. if requires_dynamic_offset: holder.append( LLLnode.from_list(["mstore", dynamic_offset_counter, maxlen])) # Calculate maximum dynamic offset placeholders, used for gas estimation. for typ in arg_types: if typ.is_dynamic_size: maxlen += typ.size_in_bytes if requires_dynamic_offset: datamem_start = dynamic_placeholder + 32 else: datamem_start = static_vars[0] # Copy necessary data into allocated dynamic section. for i, (node, typ) in enumerate(zip(arg_nodes, arg_types)): if isinstance(typ, ArrayValueAbstractType): if isinstance(node, vy_ast.Call) and node.func.get("id") == "empty": # TODO add support for this raise StructureException( "Cannot use `empty` on Bytes or String types within an event log", node) pack_args_by_32( holder=holder, maxlen=maxlen, arg=prealloacted.get(i, node), typ=typ, context=context, placeholder=static_vars[i], datamem_start=datamem_start, dynamic_offset_counter=dynamic_offset_counter, pos=pos, ) return holder, maxlen, dynamic_offset_counter, datamem_start
def pack_logging_topics(event_id, arg_nodes, arg_types, context): topics = [event_id] for node, typ in zip(arg_nodes, arg_types): value = Expr(node, context).lll_node if isinstance(typ, ArrayValueAbstractType): if isinstance(node, (vy_ast.Str, vy_ast.Bytes)): # for literals, generate the topic at compile time value = node.value if isinstance(value, str): value = value.encode() topics.append(bytes_to_int(keccak256(value))) elif value.location == "memory": topics.append(["sha3", ["add", value, 32], ["mload", value]]) else: # storage or calldata placeholder = context.new_internal_variable(value.typ) placeholder_node = LLLnode.from_list(placeholder, typ=value.typ, location="memory") copier = make_byte_array_copier( placeholder_node, LLLnode.from_list("_sub", typ=value.typ, location=value.location), ) lll_node = [ "with", "_sub", value, [ "seq", copier, [ "sha3", ["add", placeholder, 32], ["mload", placeholder] ] ], ] topics.append(lll_node) elif isinstance(typ, ArrayDefinition): size = typ.size_in_bytes if value.location == "memory": topics.append(["sha3", value, size]) else: # storage or calldata placeholder = context.new_internal_variable(value.typ) placeholder_node = LLLnode.from_list(placeholder, typ=value.typ, location="memory") setter = make_setter(placeholder_node, value, "memory", value.pos) lll_node = ["seq", setter, ["sha3", placeholder, size]] topics.append(lll_node) else: value = unwrap_location(value) topics.append(value) return topics
def parse_internal_function(code: vy_ast.FunctionDef, sig: FunctionSignature, context: Context) -> LLLnode: """ Parse a internal function (FuncDef), and produce full function body. :param sig: the FuntionSignature :param code: ast of function :return: full sig compare & function body """ func_type = code._metadata["type"] # Get nonreentrant lock nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(func_type) # Create callback_ptr, this stores a destination in the bytecode for a internal # function to jump to after a function has executed. clampers: List[LLLnode] = [] # Allocate variable space. context.memory_allocator.expand_memory(sig.max_copy_size) _post_callback_ptr = f"{sig.name}_{sig.method_id}_post_callback_ptr" context.callback_ptr = context.new_internal_variable( typ=BaseType("uint256")) clampers.append( LLLnode.from_list( ["mstore", context.callback_ptr, "pass"], annotation="pop callback pointer", )) if sig.total_default_args > 0: clampers.append(LLLnode.from_list(["label", _post_callback_ptr])) # internal functions without return types need to jump back to # the calling function, as there is no return statement to handle the # jump. if sig.output_type is None: stop_func = [["jump", ["mload", context.callback_ptr]]] else: stop_func = [["stop"]] # Generate copiers if len(sig.base_args) == 0: copier = ["pass"] clampers.append(LLLnode.from_list(copier)) elif sig.total_default_args == 0: copier = get_internal_arg_copier( total_size=sig.base_copy_size, memory_dest=MemoryPositions.RESERVED_MEMORY) clampers.append(LLLnode.from_list(copier)) # Fill variable positions for arg in sig.args: if isinstance(arg.typ, ByteArrayLike): mem_pos = context.memory_allocator.expand_memory( 32 * get_size_of_type(arg.typ)) context.vars[arg.name] = VariableRecord(arg.name, mem_pos, arg.typ, False) else: context.vars[arg.name] = VariableRecord( arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False, ) # internal function copiers. No clamping for internal functions. dyn_variable_names = [ a.name for a in sig.base_args if isinstance(a.typ, ByteArrayLike) ] if dyn_variable_names: i_placeholder = context.new_internal_variable(typ=BaseType("uint256")) unpackers: List[Any] = [] for idx, var_name in enumerate(dyn_variable_names): var = context.vars[var_name] ident = f"_load_args_{sig.method_id}_dynarg{idx}" o = make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var.pos) unpackers.append(o) if not unpackers: unpackers = ["pass"] # 0 added to complete full overarching 'seq' statement, see internal_label. unpackers.append(0) clampers.append( LLLnode.from_list( ["seq_unchecked"] + unpackers, typ=None, annotation="dynamic unpacker", pos=getpos(code), )) # Function has default arguments. if sig.total_default_args > 0: # Function with default parameters. default_sigs = sig_utils.generate_default_arg_sigs( code, context.sigs, context.global_ctx) sig_chain: List[Any] = ["seq"] for default_sig in default_sigs: sig_compare, internal_label = get_sig_statements( default_sig, getpos(code)) # Populate unset default variables set_defaults = [] for arg_name in get_default_names_to_set(sig, default_sig): value = Expr(sig.default_values[arg_name], context).lll_node var = context.vars[arg_name] left = LLLnode.from_list(var.pos, typ=var.typ, location="memory", pos=getpos(code), mutable=var.mutable) set_defaults.append( make_setter(left, value, "memory", pos=getpos(code))) current_sig_arg_names = [x.name for x in default_sig.args] # Load all variables in default section, if internal, # because the stack is a linear pipe. copier_arg_count = len(default_sig.args) copier_arg_names = current_sig_arg_names # Order copier_arg_names, this is very important. copier_arg_names = [ x.name for x in default_sig.args if x.name in copier_arg_names ] # Variables to be populated from calldata/stack. default_copiers: List[Any] = [] if copier_arg_count > 0: # Get map of variables in calldata, with thier offsets offset = 4 calldata_offset_map = {} for arg in default_sig.args: calldata_offset_map[arg.name] = offset offset += (32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32) # Copy set default parameters from calldata dynamics = [] for arg_name in copier_arg_names: var = context.vars[arg_name] if isinstance(var.typ, ByteArrayLike): _size = 32 dynamics.append(var.pos) else: _size = var.size * 32 default_copiers.append( get_internal_arg_copier( memory_dest=var.pos, total_size=_size, )) # Unpack byte array if necessary. if dynamics: i_placeholder = context.new_internal_variable( typ=BaseType("uint256")) for idx, var_pos in enumerate(dynamics): ident = f"unpack_default_sig_dyn_{default_sig.method_id}_arg{idx}" default_copiers.append( make_unpacker( ident=ident, i_placeholder=i_placeholder, begin_pos=var_pos, )) default_copiers.append(0) # for over arching seq, POP sig_chain.append([ "if", sig_compare, [ "seq", internal_label, LLLnode.from_list( ["mstore", context.callback_ptr, "pass"], annotation="pop callback pointer", pos=getpos(code), ), ["seq"] + set_defaults if set_defaults else ["pass"], ["seq_unchecked"] + default_copiers if default_copiers else ["pass"], ["goto", _post_callback_ptr], ], ]) # With internal functions all variable loading occurs in the default # function sub routine. _clampers = [["label", _post_callback_ptr]] # Function with default parameters. return LLLnode.from_list( [ "seq", sig_chain, ["seq"] + nonreentrant_pre + _clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + stop_func, ], typ=None, pos=getpos(code), ) else: # Function without default parameters. sig_compare, internal_label = get_sig_statements(sig, getpos(code)) return LLLnode.from_list( ["seq"] + [internal_label] + nonreentrant_pre + clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + stop_func, typ=None, pos=getpos(code), )
def check_assign(lhs, rhs, pos, in_function_call=False): make_setter(lhs, rhs, location="memory", pos=pos, in_function_call=in_function_call)
def gen_tuple_return(stmt, context, sub): abi_typ = abi_type_of(context.return_type) # according to the ABI, return types are ALWAYS tuples even if # only one element is being returned. # https://solidity.readthedocs.io/en/latest/abi-spec.html#function-selector-and-argument-encoding # "and the return values v_1, ..., v_k of f are encoded as # # enc((v_1, ..., v_k)) # i.e. the values are combined into a tuple and encoded. # " # therefore, wrap it in a tuple if it's not already a tuple. # (big difference between returning `(bytes,)` and `bytes`. abi_typ = ensure_tuple(abi_typ) abi_bytes_needed = abi_typ.static_size() + abi_typ.dynamic_size_bound() dst = context.memory_allocator.expand_memory(abi_bytes_needed) return_buffer = LLLnode(dst, location="memory", annotation="return_buffer", typ=context.return_type) check_assign(return_buffer, sub, pos=getpos(stmt)) if sub.value == "multi": if isinstance(context.return_type, TupleType) and not abi_typ.dynamic_size_bound(): # for tuples where every value is of the same type and a fixed length, # we can simplify the encoding by using make_setter, since # our memory encoding happens to be identical to the ABI # encoding. new_sub = LLLnode.from_list( context.new_internal_variable(context.return_type), typ=context.return_type, location="memory", ) setter = make_setter(new_sub, sub, "memory", pos=getpos(stmt)) return LLLnode.from_list( [ "seq", setter, make_return_stmt( stmt, context, new_sub, get_size_of_type(context.return_type) * 32, ), ], typ=None, pos=getpos(stmt), ) # in case of multi we can't create a variable to store location of the return expression # as multi can have data from multiple location like store, calldata etc encode_out = abi_encode(return_buffer, sub, pos=getpos(stmt), returns=True) load_return_len = ["mload", MemoryPositions.FREE_VAR_SPACE] os = [ "seq", ["mstore", MemoryPositions.FREE_VAR_SPACE, encode_out], make_return_stmt(stmt, context, return_buffer, load_return_len), ] return LLLnode.from_list(os, typ=None, pos=getpos(stmt), valency=0) # for tuple return types where a function is called inside the tuple, we # process the calls prior to encoding the return data if sub.value == "seq_unchecked" and sub.args[-1].value == "multi": encode_out = abi_encode(return_buffer, sub.args[-1], pos=getpos(stmt), returns=True) load_return_len = ["mload", MemoryPositions.FREE_VAR_SPACE] os = (["seq"] + sub.args[:-1] + [ ["mstore", MemoryPositions.FREE_VAR_SPACE, encode_out], make_return_stmt(stmt, context, return_buffer, load_return_len), ]) return LLLnode.from_list(os, typ=None, pos=getpos(stmt), valency=0) # for all othe cases we are creating a stack variable named sub_loc to store the location # of the return expression. This is done so that the return expression does not get evaluated # abi-encode uses a function named o_list which evaluate the expression multiple times sub_loc = LLLnode("sub_loc", typ=sub.typ, location=sub.location) encode_out = abi_encode(return_buffer, sub_loc, pos=getpos(stmt), returns=True) load_return_len = ["mload", MemoryPositions.FREE_VAR_SPACE] os = [ "with", "sub_loc", sub, [ "seq", ["mstore", MemoryPositions.FREE_VAR_SPACE, encode_out], make_return_stmt(stmt, context, return_buffer, load_return_len), ], ] return LLLnode.from_list(os, typ=None, pos=getpos(stmt), valency=0)
def abi_encode(dst, lll_node, pos=None, bufsz=None, returns=False): parent_abi_t = abi_type_of(lll_node.typ) size_bound = parent_abi_t.static_size() + parent_abi_t.dynamic_size_bound() if bufsz is not None and bufsz < 32 * size_bound: raise CompilerPanic("buffer provided to abi_encode not large enough") lll_ret = ["seq"] dyn_ofst = "dyn_ofst" # current offset in the dynamic section dst_begin = "dst" # pointer to beginning of buffer dst_loc = "dst_loc" # pointer to write location in static section os = o_list(lll_node, pos=pos) for i, o in enumerate(os): abi_t = abi_type_of(o.typ) if parent_abi_t.is_tuple(): if abi_t.is_dynamic(): lll_ret.append(["mstore", dst_loc, dyn_ofst]) # recurse child_dst = ["add", dst_begin, dyn_ofst] child = abi_encode(child_dst, o, pos=pos, returns=True) # increment dyn ofst for the return # (optimization note: # if non-returning and this is the last dyn member in # the tuple, this set can be elided.) lll_ret.append(["set", dyn_ofst, ["add", dyn_ofst, child]]) else: # recurse lll_ret.append(abi_encode(dst_loc, o, pos=pos, returns=False)) elif isinstance(o.typ, BaseType): d = LLLnode(dst_loc, typ=o.typ, location="memory") lll_ret.append(make_setter(d, o, location=d.location, pos=pos)) elif isinstance(o.typ, ByteArrayLike): d = LLLnode.from_list(dst_loc, typ=o.typ, location="memory") lll_ret.append([ "seq", make_setter(d, o, location=d.location, pos=pos), zero_pad(d) ]) else: raise CompilerPanic(f"unreachable type: {o.typ}") if i + 1 == len(os): pass # optimize out the last increment to dst_loc else: # note: always false for non-tuple types sz = abi_t.embedded_static_size() lll_ret.append(["set", dst_loc, ["add", dst_loc, sz]]) # declare LLL variables. if returns: if not parent_abi_t.is_dynamic(): lll_ret.append(parent_abi_t.embedded_static_size()) elif parent_abi_t.is_tuple(): lll_ret.append("dyn_ofst") elif isinstance(lll_node.typ, ByteArrayLike): # for abi purposes, return zero-padded length calc_len = ["ceil32", ["add", 32, ["mload", dst_loc]]] lll_ret.append(calc_len) else: raise CompilerPanic("unknown type {lll_node.typ}") if not (parent_abi_t.is_dynamic() and parent_abi_t.is_tuple()): pass # optimize out dyn_ofst allocation if we don't need it else: dyn_section_start = parent_abi_t.static_size() lll_ret = ["with", "dyn_ofst", dyn_section_start, lll_ret] lll_ret = ["with", dst_begin, dst, ["with", dst_loc, dst_begin, lll_ret]] return LLLnode.from_list(lll_ret)
def parse_external_function( code: vy_ast.FunctionDef, sig: FunctionSignature, context: Context, check_nonpayable: bool, ) -> LLLnode: """ Parse a external function (FuncDef), and produce full function body. :param sig: the FuntionSignature :param code: ast of function :param check_nonpayable: if True, include a check that `msg.value == 0` at the beginning of the function :return: full sig compare & function body """ func_type = code._metadata["type"] # Get nonreentrant lock nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(func_type) clampers = [] # Generate copiers copier: List[Any] = ["pass"] if not len(sig.base_args): copier = ["pass"] elif sig.name == "__init__": copier = [ "codecopy", MemoryPositions.RESERVED_MEMORY, "~codelen", sig.base_copy_size ] context.memory_allocator.expand_memory(sig.max_copy_size) clampers.append(copier) if check_nonpayable and sig.mutability != "payable": # if the contract contains payable functions, but this is not one of them # add an assertion that the value of the call is zero clampers.append(["assert", ["iszero", "callvalue"]]) # Fill variable positions default_args_start_pos = len(sig.base_args) for i, arg in enumerate(sig.args): if i < len(sig.base_args): clampers.append( make_arg_clamper( arg.pos, context.memory_allocator.get_next_memory_position(), arg.typ, sig.name == "__init__", )) if isinstance(arg.typ, ByteArrayLike): mem_pos = context.memory_allocator.expand_memory( 32 * get_size_of_type(arg.typ)) context.vars[arg.name] = VariableRecord(arg.name, mem_pos, arg.typ, False) else: if sig.name == "__init__": context.vars[arg.name] = VariableRecord( arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False, ) elif i >= default_args_start_pos: # default args need to be allocated in memory. type_size = get_size_of_type(arg.typ) * 32 default_arg_pos = context.memory_allocator.expand_memory( type_size) context.vars[arg.name] = VariableRecord( name=arg.name, pos=default_arg_pos, typ=arg.typ, mutable=False, ) else: context.vars[arg.name] = VariableRecord(name=arg.name, pos=4 + arg.pos, typ=arg.typ, mutable=False, location="calldata") # Create "clampers" (input well-formedness checkers) # Return function body if sig.name == "__init__": o = LLLnode.from_list( ["seq"] + clampers + [parse_body(code.body, context)], # type: ignore pos=getpos(code), ) # Is default function. elif sig.is_default_func(): o = LLLnode.from_list( ["seq"] + clampers + [parse_body(code.body, context)] + [["stop"]], # type: ignore pos=getpos(code), ) # Is a normal function. else: # Function with default parameters. if sig.total_default_args > 0: function_routine = f"{sig.name}_{sig.method_id}" default_sigs = sig_utils.generate_default_arg_sigs( code, context.sigs, context.global_ctx) sig_chain: List[Any] = ["seq"] for default_sig in default_sigs: sig_compare, _ = get_sig_statements(default_sig, getpos(code)) # Populate unset default variables set_defaults = [] for arg_name in get_default_names_to_set(sig, default_sig): value = Expr(sig.default_values[arg_name], context).lll_node var = context.vars[arg_name] left = LLLnode.from_list( var.pos, typ=var.typ, location="memory", pos=getpos(code), mutable=var.mutable, ) set_defaults.append( make_setter(left, value, "memory", pos=getpos(code))) current_sig_arg_names = {x.name for x in default_sig.args} base_arg_names = {arg.name for arg in sig.base_args} copier_arg_count = len(default_sig.args) - len(sig.base_args) copier_arg_names = list(current_sig_arg_names - base_arg_names) # Order copier_arg_names, this is very important. copier_arg_names = [ x.name for x in default_sig.args if x.name in copier_arg_names ] # Variables to be populated from calldata/stack. default_copiers: List[Any] = [] if copier_arg_count > 0: # Get map of variables in calldata, with thier offsets offset = 4 calldata_offset_map = {} for arg in default_sig.args: calldata_offset_map[arg.name] = offset offset += (32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32) # Copy default parameters from calldata. for arg_name in copier_arg_names: var = context.vars[arg_name] calldata_offset = calldata_offset_map[arg_name] # Add clampers. default_copiers.append( make_arg_clamper( calldata_offset - 4, var.pos, var.typ, )) # Add copying code. _offset: Union[int, List[Any]] = calldata_offset if isinstance(var.typ, ByteArrayLike): _offset = [ "add", 4, ["calldataload", calldata_offset] ] default_copiers.append( get_external_arg_copier( memory_dest=var.pos, total_size=var.size * 32, offset=_offset, )) default_copiers.append(0) # for over arching seq, POP sig_chain.append([ "if", sig_compare, [ "seq", ["seq"] + set_defaults if set_defaults else ["pass"], ["seq_unchecked"] + default_copiers if default_copiers else ["pass"], ["goto", function_routine], ], ]) # Function with default parameters. function_jump_label = f"{sig.name}_{sig.method_id}_skip" o = LLLnode.from_list( [ "seq", sig_chain, [ "seq", ["goto", function_jump_label], ["label", function_routine], ["seq"] + nonreentrant_pre + clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + [["stop"]], ["label", function_jump_label], ], ], typ=None, pos=getpos(code), ) else: # Function without default parameters. sig_compare, _ = get_sig_statements(sig, getpos(code)) o = LLLnode.from_list( [ "if", sig_compare, ["seq"] + nonreentrant_pre + clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + [["stop"]], ], typ=None, pos=getpos(code), ) return o