def parse_return(self): if self.context.return_type is None: if self.stmt.value: raise TypeMismatchException("Not expecting to return a value", self.stmt) return LLLnode.from_list( make_return_stmt(self.stmt, self.context, 0, 0), typ=None, pos=getpos(self.stmt), valency=0, ) if not self.stmt.value: raise TypeMismatchException("Expecting to return a value", self.stmt) def zero_pad(bytez_placeholder, maxlen): zero_padder = LLLnode.from_list(['pass']) if maxlen > 0: # Iterator used to zero pad memory. zero_pad_i = self.context.new_placeholder(BaseType('uint256')) zero_padder = LLLnode.from_list( [ 'with', '_ceil32_end', ['ceil32', ['mload', bytez_placeholder]], [ 'repeat', zero_pad_i, ['mload', bytez_placeholder], maxlen, [ 'seq', # stay within allocated bounds [ 'if', [ 'gt', ['mload', zero_pad_i], '_ceil32_end' ], 'break' ], [ 'mstore8', [ 'add', ['add', 32, bytez_placeholder], ['mload', zero_pad_i] ], 0 ], ], ], ], annotation="Zero pad") return zero_padder sub = Expr(self.stmt.value, self.context).lll_node # Returning a value (most common case) if isinstance(sub.typ, BaseType): sub = unwrap_location(sub) if not isinstance(self.context.return_type, BaseType): raise TypeMismatchException( "Return type units mismatch %r %r" % ( sub.typ, self.context.return_type, ), self.stmt.value) elif self.context.return_type != sub.typ and not sub.typ.is_literal: raise TypeMismatchException( "Trying to return base type %r, output expecting %r" % ( sub.typ, self.context.return_type, ), self.stmt.value, ) elif sub.typ.is_literal and ( self.context.return_type.typ == sub.typ or 'int' in self.context.return_type.typ and 'int' in sub.typ.typ): # noqa: E501 if not SizeLimits.in_bounds(self.context.return_type.typ, sub.value): raise InvalidLiteralException( "Number out of range: " + str(sub.value), self.stmt) else: return LLLnode.from_list( [ 'seq', ['mstore', 0, sub], make_return_stmt(self.stmt, self.context, 0, 32) ], typ=None, pos=getpos(self.stmt), valency=0, ) elif is_base_type(sub.typ, self.context.return_type.typ) or ( is_base_type(sub.typ, 'int128') and is_base_type( self.context.return_type, 'int256')): # noqa: E501 return LLLnode.from_list( [ 'seq', ['mstore', 0, sub], make_return_stmt(self.stmt, self.context, 0, 32) ], typ=None, pos=getpos(self.stmt), valency=0, ) else: raise TypeMismatchException( "Unsupported type conversion: %r to %r" % (sub.typ, self.context.return_type), self.stmt.value, ) # Returning a byte array elif isinstance(sub.typ, ByteArrayLike): if not sub.typ.eq_base(self.context.return_type): raise TypeMismatchException( "Trying to return base type %r, output expecting %r" % ( sub.typ, self.context.return_type, ), self.stmt.value, ) if sub.typ.maxlen > self.context.return_type.maxlen: raise TypeMismatchException( "Cannot cast from greater max-length %d to shorter max-length %d" % ( sub.typ.maxlen, self.context.return_type.maxlen, ), self.stmt.value, ) # loop memory has to be allocated first. loop_memory_position = self.context.new_placeholder( typ=BaseType('uint256')) # len & bytez placeholder have to be declared after each other at all times. len_placeholder = self.context.new_placeholder( typ=BaseType('uint256')) bytez_placeholder = self.context.new_placeholder(typ=sub.typ) if sub.location in ('storage', 'memory'): return LLLnode.from_list([ 'seq', make_byte_array_copier(LLLnode( bytez_placeholder, location='memory', typ=sub.typ), sub, pos=getpos(self.stmt)), zero_pad(bytez_placeholder, sub.typ.maxlen), ['mstore', len_placeholder, 32], make_return_stmt( self.stmt, self.context, len_placeholder, ['ceil32', ['add', ['mload', bytez_placeholder], 64]], loop_memory_position=loop_memory_position, ) ], typ=None, pos=getpos(self.stmt), valency=0) else: raise Exception("Invalid location: %s" % sub.location) elif isinstance(sub.typ, ListType): sub_base_type = re.split(r'\(|\[', str(sub.typ.subtype))[0] ret_base_type = re.split(r'\(|\[', str(self.context.return_type.subtype))[0] loop_memory_position = self.context.new_placeholder( typ=BaseType('uint256')) if sub_base_type != ret_base_type: raise TypeMismatchException( "List return type %r does not match specified return type, expecting %r" % (sub_base_type, ret_base_type), self.stmt) elif sub.location == "memory" and sub.value != "multi": return LLLnode.from_list( make_return_stmt( self.stmt, self.context, sub, get_size_of_type(self.context.return_type) * 32, loop_memory_position=loop_memory_position, ), typ=None, pos=getpos(self.stmt), valency=0, ) else: new_sub = LLLnode.from_list( self.context.new_placeholder(self.context.return_type), typ=self.context.return_type, location='memory', ) setter = make_setter(new_sub, sub, 'memory', pos=getpos(self.stmt)) return LLLnode.from_list([ 'seq', setter, make_return_stmt( self.stmt, self.context, new_sub, get_size_of_type(self.context.return_type) * 32, loop_memory_position=loop_memory_position, ) ], typ=None, pos=getpos(self.stmt)) # Returning a struct elif isinstance(sub.typ, StructType): retty = self.context.return_type if not isinstance(retty, StructType) or retty.name != sub.typ.name: raise TypeMismatchException( "Trying to return %r, output expecting %r" % ( sub.typ, self.context.return_type, ), self.stmt.value, ) return gen_tuple_return(self.stmt, self.context, sub) # Returning a tuple. elif isinstance(sub.typ, TupleType): if not isinstance(self.context.return_type, TupleType): raise TypeMismatchException( "Trying to return tuple type %r, output expecting %r" % ( sub.typ, self.context.return_type, ), self.stmt.value, ) if len(self.context.return_type.members) != len(sub.typ.members): raise StructureException("Tuple lengths don't match!", self.stmt) # check return type matches, sub type. for i, ret_x in enumerate(self.context.return_type.members): s_member = sub.typ.members[i] sub_type = s_member if isinstance(s_member, NodeType) else s_member.typ if type(sub_type) is not type(ret_x): raise StructureException( "Tuple return type does not match annotated return. {} != {}" .format(type(sub_type), type(ret_x)), self.stmt) return gen_tuple_return(self.stmt, self.context, sub) else: raise TypeMismatchException("Can't return type %r" % sub.typ, self.stmt)
def to_int128(expr, args, kwargs, context): in_arg = args[0] input_type, _ = get_type(in_arg) if input_type == "num_literal": if isinstance(in_arg, int): if not SizeLimits.in_bounds("int128", in_arg): raise InvalidLiteral(f"Number out of range: {in_arg}") return LLLnode.from_list(in_arg, typ=BaseType("int128"), pos=getpos(expr)) elif isinstance(in_arg, Decimal): if not SizeLimits.in_bounds("int128", math.trunc(in_arg)): raise InvalidLiteral( f"Number out of range: {math.trunc(in_arg)}") return LLLnode.from_list(math.trunc(in_arg), typ=BaseType("int128"), pos=getpos(expr)) else: raise InvalidLiteral(f"Unknown numeric literal type: {in_arg}") elif input_type == "bytes32": if in_arg.typ.is_literal: if not SizeLimits.in_bounds("int128", in_arg.value): raise InvalidLiteral(f"Number out of range: {in_arg.value}", expr) else: return LLLnode.from_list(in_arg, typ=BaseType("int128"), pos=getpos(expr)) else: return LLLnode.from_list( int128_clamp(in_arg), typ=BaseType("int128"), pos=getpos(expr), ) elif input_type == "address": return LLLnode.from_list( ["signextend", 15, ["and", in_arg, (SizeLimits.ADDRSIZE - 1)]], typ=BaseType("int128"), pos=getpos(expr), ) elif input_type in ("String", "Bytes"): if in_arg.typ.maxlen > 32: raise TypeMismatch( f"Cannot convert bytes array of max length {in_arg.typ.maxlen} to int128", expr, ) return byte_array_to_num(in_arg, expr, "int128") elif input_type == "uint256": if in_arg.typ.is_literal: if not SizeLimits.in_bounds("int128", in_arg.value): raise InvalidLiteral(f"Number out of range: {in_arg.value}", expr) else: return LLLnode.from_list(in_arg, typ=BaseType("int128"), pos=getpos(expr)) else: return LLLnode.from_list( ["uclample", in_arg, ["mload", MemoryPositions.MAX_INT128]], typ=BaseType("int128"), pos=getpos(expr), ) elif input_type == "decimal": return LLLnode.from_list( int128_clamp(["sdiv", in_arg, DECIMAL_DIVISOR]), typ=BaseType("int128"), pos=getpos(expr), ) elif input_type == "bool": return LLLnode.from_list(in_arg, typ=BaseType("int128"), pos=getpos(expr)) else: raise InvalidLiteral(f"Invalid input for int128: {in_arg}", expr)
def parse_Attribute(self): # x.balance: balance of address x if self.expr.attr == "balance": addr = Expr.parse_value_expr(self.expr.value, self.context) if is_base_type(addr.typ, "address"): if (isinstance(self.expr.value, vy_ast.Name) and self.expr.value.id == "self" and version_check(begin="istanbul")): seq = ["selfbalance"] else: seq = ["balance", addr] return LLLnode.from_list( seq, typ=BaseType("uint256"), location=None, pos=getpos(self.expr), ) # x.codesize: codesize of address x elif self.expr.attr == "codesize" or self.expr.attr == "is_contract": addr = Expr.parse_value_expr(self.expr.value, self.context) if is_base_type(addr.typ, "address"): if self.expr.attr == "codesize": if self.expr.value.id == "self": eval_code = ["codesize"] else: eval_code = ["extcodesize", addr] output_type = "uint256" else: eval_code = ["gt", ["extcodesize", addr], 0] output_type = "bool" return LLLnode.from_list( eval_code, typ=BaseType(output_type), location=None, pos=getpos(self.expr), ) # x.codehash: keccak of address x elif self.expr.attr == "codehash": addr = Expr.parse_value_expr(self.expr.value, self.context) if not version_check(begin="constantinople"): raise EvmVersionException( "address.codehash is unavailable prior to constantinople ruleset", self.expr) if is_base_type(addr.typ, "address"): return LLLnode.from_list( ["extcodehash", addr], typ=BaseType("bytes32"), location=None, pos=getpos(self.expr), ) # self.x: global attribute elif isinstance(self.expr.value, vy_ast.Name) and self.expr.value.id == "self": var = self.context.globals[self.expr.attr] return LLLnode.from_list( var.pos, typ=var.typ, location="storage", pos=getpos(self.expr), annotation="self." + self.expr.attr, ) # Reserved keywords elif (isinstance(self.expr.value, vy_ast.Name) and self.expr.value.id in ENVIRONMENT_VARIABLES): key = f"{self.expr.value.id}.{self.expr.attr}" if key == "msg.sender" and not self.context.is_internal: return LLLnode.from_list(["caller"], typ="address", pos=getpos(self.expr)) elif key == "msg.data" and not self.context.is_internal: is_len = self.expr._metadata.get("is_len") if is_len is True: typ = ByteArrayType(32) pos = self.context.new_internal_variable(typ) node = ["seq", ["mstore", pos, "calldatasize"], pos] return LLLnode.from_list(node, typ=typ, pos=getpos(self.expr), location="memory") size = self.expr._metadata.get("size") typ = ByteArrayType(size + 32) pos = self.context.new_internal_variable(typ) node = [ "seq", ["assert", ["le", size, "calldatasize"]], ["mstore", pos, size], ["calldatacopy", pos + 32, 0, size], pos, ] return LLLnode.from_list(node, typ=typ, pos=getpos(self.expr), location="memory") elif key == "msg.value" and self.context.is_payable: return LLLnode.from_list( ["callvalue"], typ=BaseType("uint256"), pos=getpos(self.expr), ) elif key == "msg.gas": return LLLnode.from_list( ["gas"], typ="uint256", pos=getpos(self.expr), ) elif key == "block.difficulty": return LLLnode.from_list( ["difficulty"], typ="uint256", pos=getpos(self.expr), ) elif key == "block.timestamp": return LLLnode.from_list( ["timestamp"], typ=BaseType("uint256"), pos=getpos(self.expr), ) elif key == "block.coinbase": return LLLnode.from_list(["coinbase"], typ="address", pos=getpos(self.expr)) elif key == "block.number": return LLLnode.from_list(["number"], typ="uint256", pos=getpos(self.expr)) elif key == "block.prevhash": return LLLnode.from_list( ["blockhash", ["sub", "number", 1]], typ="bytes32", pos=getpos(self.expr), ) elif key == "tx.origin": return LLLnode.from_list(["origin"], typ="address", pos=getpos(self.expr)) elif key == "chain.id": if not version_check(begin="istanbul"): raise EvmVersionException( "chain.id is unavailable prior to istanbul ruleset", self.expr) return LLLnode.from_list(["chainid"], typ="uint256", pos=getpos(self.expr)) # Other variables else: sub = Expr.parse_variable_location(self.expr.value, self.context) # contract type if isinstance(sub.typ, InterfaceType): return sub if isinstance(sub.typ, StructType) and self.expr.attr in sub.typ.members: return add_variable_offset(sub, self.expr.attr, pos=getpos(self.expr))
def pack_logging_data(expected_data, args, context, pos): # Checks to see if there's any data if not args: return ['seq'], 0, None, 0 holder = ['seq'] maxlen = len(args) * 32 # total size of all packed args (upper limit) # Unroll any function calls, to temp variables. prealloacted = {} for idx, (arg, _expected_arg) in enumerate(zip(args, expected_data)): if isinstance(arg, (ast.Str, ast.Call)): expr = Expr(arg, context) source_lll = expr.lll_node typ = source_lll.typ if isinstance(arg, ast.Str): if len(arg.s) > typ.maxlen: raise TypeMismatchException( "Data input bytes are to big: %r %r" % (len(arg.s), typ), pos) tmp_variable = context.new_variable( '_log_pack_var_%i_%i' % (arg.lineno, arg.col_offset), source_lll.typ, ) tmp_variable_node = LLLnode.from_list( tmp_variable, typ=source_lll.typ, pos=getpos(arg), location="memory", annotation='log_prealloacted %r' % source_lll.typ, ) # Store len. # holder.append(['mstore', len_placeholder, ['mload', unwrap_location(source_lll)]]) # Copy bytes. holder.append( make_setter(tmp_variable_node, source_lll, pos=getpos(arg), location='memory')) prealloacted[idx] = tmp_variable_node requires_dynamic_offset = any( [isinstance(data.typ, ByteArrayLike) for data in expected_data]) if requires_dynamic_offset: # Iterator used to zero pad memory. zero_pad_i = context.new_placeholder(BaseType('uint256')) dynamic_offset_counter = context.new_placeholder(BaseType(32)) dynamic_placeholder = context.new_placeholder(BaseType(32)) else: dynamic_offset_counter = None zero_pad_i = None # Create placeholder for static args. Note: order of new_*() is important. placeholder_map = {} for i, (_arg, data) in enumerate(zip(args, expected_data)): typ = data.typ if not isinstance(typ, ByteArrayLike): placeholder = context.new_placeholder(typ) else: placeholder = context.new_placeholder(BaseType(32)) placeholder_map[i] = placeholder # Populate static placeholders. for i, (arg, data) in enumerate(zip(args, expected_data)): typ = data.typ placeholder = placeholder_map[i] if not isinstance(typ, ByteArrayLike): holder, maxlen = pack_args_by_32( holder, maxlen, prealloacted.get(i, arg), typ, context, placeholder, zero_pad_i=zero_pad_i, pos=pos, ) # Dynamic position starts right after the static args. if requires_dynamic_offset: holder.append( LLLnode.from_list(['mstore', dynamic_offset_counter, maxlen])) # Calculate maximum dynamic offset placeholders, used for gas estimation. for _arg, data in zip(args, expected_data): typ = data.typ if isinstance(typ, ByteArrayLike): maxlen += 32 + ceil32(typ.maxlen) if requires_dynamic_offset: datamem_start = dynamic_placeholder + 32 else: datamem_start = placeholder_map[0] # Copy necessary data into allocated dynamic section. for i, (arg, data) in enumerate(zip(args, expected_data)): typ = data.typ if isinstance(typ, ByteArrayLike): pack_args_by_32(holder=holder, maxlen=maxlen, arg=prealloacted.get(i, arg), typ=typ, context=context, placeholder=placeholder_map[i], datamem_start=datamem_start, dynamic_offset_counter=dynamic_offset_counter, zero_pad_i=zero_pad_i, pos=pos) return holder, maxlen, dynamic_offset_counter, datamem_start
def call_self_private(stmt_expr, context, sig): # ** Private Call ** # Steps: # (x) push current local variables # (x) push arguments # (x) push jumpdest (callback ptr) # (x) jump to label # (x) pop return values # (x) pop local variables method_name, expr_args, sig = call_lookup_specs(stmt_expr, context) pre_init = [] pop_local_vars = [] push_local_vars = [] pop_return_values = [] push_args = [] # Push local variables. var_slots = [ (v.pos, v.size) for name, v in context.vars.items() if v.location == 'memory' ] if var_slots: var_slots.sort(key=lambda x: x[0]) mem_from, mem_to = var_slots[0][0], var_slots[-1][0] + var_slots[-1][1] * 32 i_placeholder = context.new_placeholder(BaseType('uint256')) local_save_ident = "_%d_%d" % (stmt_expr.lineno, stmt_expr.col_offset) push_loop_label = 'save_locals_start' + local_save_ident pop_loop_label = 'restore_locals_start' + local_save_ident if mem_to - mem_from > 320: push_local_vars = [ ['mstore', i_placeholder, mem_from], ['label', push_loop_label], ['mload', ['mload', i_placeholder]], ['mstore', i_placeholder, ['add', ['mload', i_placeholder], 32]], ['if', ['lt', ['mload', i_placeholder], mem_to], ['goto', push_loop_label]] ] pop_local_vars = [ ['mstore', i_placeholder, mem_to - 32], ['label', pop_loop_label], ['mstore', ['mload', i_placeholder], 'pass'], ['mstore', i_placeholder, ['sub', ['mload', i_placeholder], 32]], ['if', ['ge', ['mload', i_placeholder], mem_from], ['goto', pop_loop_label]] ] else: push_local_vars = [['mload', pos] for pos in range(mem_from, mem_to, 32)] pop_local_vars = [['mstore', pos, 'pass'] for pos in range(mem_to-32, mem_from-32, -32)] # Push Arguments if expr_args: inargs, inargsize, arg_pos = pack_arguments( sig, expr_args, context, return_placeholder=False, pos=getpos(stmt_expr), ) push_args += [inargs] # copy arguments first, to not mess up the push/pop sequencing. static_arg_size = 32 * sum( [get_static_size_of_type(arg.typ) for arg in expr_args]) static_pos = int(arg_pos + static_arg_size) needs_dyn_section = any( [has_dynamic_data(arg.typ) for arg in expr_args]) if needs_dyn_section: ident = 'push_args_%d_%d_%d' % (sig.method_id, stmt_expr.lineno, stmt_expr.col_offset) start_label = ident + '_start' end_label = ident + '_end' i_placeholder = context.new_placeholder(BaseType('uint256')) # Calculate copy start position. # Given | static | dynamic | section in memory, # copy backwards so the values are in order on the stack. # We calculate i, the end of the whole encoded part # (i.e. the starting index for copy) # by taking ceil32(len<arg>) + offset<arg> + arg_pos # for the last dynamic argument and arg_pos is the start # the whole argument section. for idx, arg in enumerate(expr_args): if isinstance(arg.typ, ByteArrayLike): last_idx = idx push_args += [ ['with', 'offset', ['mload', arg_pos + last_idx * 32], ['with', 'len_pos', ['add', arg_pos, 'offset'], ['with', 'len_value', ['mload', 'len_pos'], ['mstore', i_placeholder, ['add', 'len_pos', ['ceil32', 'len_value']]]]]] ] # loop from end of dynamic section to start of dynamic section, # pushing each element onto the stack. push_args += [ ['label', start_label], ['if', ['lt', ['mload', i_placeholder], static_pos], ['goto', end_label]], ['mload', ['mload', i_placeholder]], ['mstore', i_placeholder, ['sub', ['mload', i_placeholder], 32]], # decrease i ['goto', start_label], ['label', end_label] ] # push static section push_args += [ ['mload', pos] for pos in reversed(range(arg_pos, static_pos, 32)) ] # Jump to function label. jump_to_func = [ ['add', ['pc'], 6], # set callback pointer. ['goto', 'priv_{}'.format(sig.method_id)], ['jumpdest'], ] # Pop return values. returner = [0] if sig.output_type: output_placeholder, returner, output_size = call_make_placeholder(stmt_expr, context, sig) if output_size > 0: dynamic_offsets = [] if isinstance(sig.output_type, (BaseType, ListType)): pop_return_values = [ ['mstore', ['add', output_placeholder, pos], 'pass'] for pos in range(0, output_size, 32) ] elif isinstance(sig.output_type, ByteArrayLike): dynamic_offsets = [(0, sig.output_type)] pop_return_values = [ ['pop', 'pass'], ] elif isinstance(sig.output_type, TupleLike): static_offset = 0 pop_return_values = [] for out_type in sig.output_type.members: if isinstance(out_type, ByteArrayLike): pop_return_values.append( ['mstore', ['add', output_placeholder, static_offset], 'pass'] ) dynamic_offsets.append( (['mload', ['add', output_placeholder, static_offset]], out_type) ) else: pop_return_values.append( ['mstore', ['add', output_placeholder, static_offset], 'pass'] ) static_offset += 32 # append dynamic unpacker. dyn_idx = 0 for in_memory_offset, _out_type in dynamic_offsets: ident = "%d_%d_arg_%d" % (stmt_expr.lineno, stmt_expr.col_offset, dyn_idx) dyn_idx += 1 start_label = 'dyn_unpack_start_' + ident end_label = 'dyn_unpack_end_' + ident i_placeholder = context.new_placeholder(typ=BaseType('uint256')) begin_pos = ['add', output_placeholder, in_memory_offset] # loop until length. o = LLLnode.from_list( ['seq_unchecked', ['mstore', begin_pos, 'pass'], # get len ['mstore', i_placeholder, 0], ['label', start_label], [ # break 'if', ['ge', ['mload', i_placeholder], ['ceil32', ['mload', begin_pos]]], ['goto', end_label] ], [ # pop into correct memory slot. 'mstore', ['add', ['add', begin_pos, 32], ['mload', i_placeholder]], 'pass', ], # increment i ['mstore', i_placeholder, ['add', 32, ['mload', i_placeholder]]], ['goto', start_label], ['label', end_label]], typ=None, annotation='dynamic unpacker', pos=getpos(stmt_expr)) pop_return_values.append(o) call_body = list(itertools.chain( ['seq_unchecked'], pre_init, push_local_vars, push_args, jump_to_func, pop_return_values, pop_local_vars, [returner], )) # If we have no return, we need to pop off pop_returner_call_body = ['pop', call_body] if sig.output_type is None else call_body o = LLLnode.from_list( pop_returner_call_body, typ=sig.output_type, location='memory', pos=getpos(stmt_expr), annotation='Internal Call: %s' % method_name, add_gas_estimate=sig.gas ) o.gas += sig.gas return o
def parse_return(self): if self.context.return_type is None: if self.stmt.value: raise TypeMismatch("Not expecting to return a value", self.stmt) return LLLnode.from_list( make_return_stmt(self.stmt, self.context, 0, 0), typ=None, pos=getpos(self.stmt), valency=0, ) if not self.stmt.value: raise TypeMismatch("Expecting to return a value", self.stmt) sub = Expr(self.stmt.value, self.context).lll_node # Returning a value (most common case) if isinstance(sub.typ, BaseType): sub = unwrap_location(sub) if not isinstance(self.context.return_type, BaseType): raise TypeMismatch( f"Return type units mismatch {sub.typ} {self.context.return_type}", self.stmt.value ) elif self.context.return_type != sub.typ and not sub.typ.is_literal: raise TypeMismatch( f"Trying to return base type {sub.typ}, output expecting " f"{self.context.return_type}", self.stmt.value, ) elif sub.typ.is_literal and (self.context.return_type.typ == sub.typ or 'int' in self.context.return_type.typ and 'int' in sub.typ.typ): # noqa: E501 if not SizeLimits.in_bounds(self.context.return_type.typ, sub.value): raise InvalidLiteral( "Number out of range: " + str(sub.value), self.stmt ) else: return LLLnode.from_list( [ 'seq', ['mstore', 0, sub], make_return_stmt(self.stmt, self.context, 0, 32) ], typ=None, pos=getpos(self.stmt), valency=0, ) elif is_base_type(sub.typ, self.context.return_type.typ) or (is_base_type(sub.typ, 'int128') and is_base_type(self.context.return_type, 'int256')): # noqa: E501 return LLLnode.from_list( ['seq', ['mstore', 0, sub], make_return_stmt(self.stmt, self.context, 0, 32)], typ=None, pos=getpos(self.stmt), valency=0, ) else: raise TypeMismatch( f"Unsupported type conversion: {sub.typ} to {self.context.return_type}", self.stmt.value, ) # Returning a byte array elif isinstance(sub.typ, ByteArrayLike): if not sub.typ.eq_base(self.context.return_type): raise TypeMismatch( f"Trying to return base type {sub.typ}, output expecting " f"{self.context.return_type}", self.stmt.value, ) if sub.typ.maxlen > self.context.return_type.maxlen: raise TypeMismatch( f"Cannot cast from greater max-length {sub.typ.maxlen} to shorter " f"max-length {self.context.return_type.maxlen}", self.stmt.value, ) # loop memory has to be allocated first. loop_memory_position = self.context.new_placeholder(typ=BaseType('uint256')) # len & bytez placeholder have to be declared after each other at all times. len_placeholder = self.context.new_placeholder(typ=BaseType('uint256')) bytez_placeholder = self.context.new_placeholder(typ=sub.typ) if sub.location in ('storage', 'memory'): return LLLnode.from_list([ 'seq', make_byte_array_copier( LLLnode(bytez_placeholder, location='memory', typ=sub.typ), sub, pos=getpos(self.stmt) ), zero_pad(bytez_placeholder), ['mstore', len_placeholder, 32], make_return_stmt( self.stmt, self.context, len_placeholder, ['ceil32', ['add', ['mload', bytez_placeholder], 64]], loop_memory_position=loop_memory_position, ) ], typ=None, pos=getpos(self.stmt), valency=0) else: raise Exception(f"Invalid location: {sub.location}") elif isinstance(sub.typ, ListType): loop_memory_position = self.context.new_placeholder(typ=BaseType('uint256')) if sub.typ != self.context.return_type: raise TypeMismatch( f"List return type {sub.typ} does not match specified " f"return type, expecting {self.context.return_type}", self.stmt ) elif sub.location == "memory" and sub.value != "multi": return LLLnode.from_list( make_return_stmt( self.stmt, self.context, sub, get_size_of_type(self.context.return_type) * 32, loop_memory_position=loop_memory_position, ), typ=None, pos=getpos(self.stmt), valency=0, ) else: new_sub = LLLnode.from_list( self.context.new_placeholder(self.context.return_type), typ=self.context.return_type, location='memory', ) setter = make_setter(new_sub, sub, 'memory', pos=getpos(self.stmt)) return LLLnode.from_list([ 'seq', setter, make_return_stmt( self.stmt, self.context, new_sub, get_size_of_type(self.context.return_type) * 32, loop_memory_position=loop_memory_position, ) ], typ=None, pos=getpos(self.stmt)) # Returning a struct elif isinstance(sub.typ, StructType): retty = self.context.return_type if not isinstance(retty, StructType) or retty.name != sub.typ.name: raise TypeMismatch( f"Trying to return {sub.typ}, output expecting {self.context.return_type}", self.stmt.value, ) return gen_tuple_return(self.stmt, self.context, sub) # Returning a tuple. elif isinstance(sub.typ, TupleType): if not isinstance(self.context.return_type, TupleType): raise TypeMismatch( f"Trying to return tuple type {sub.typ}, output expecting " f"{self.context.return_type}", self.stmt.value, ) if len(self.context.return_type.members) != len(sub.typ.members): raise StructureException("Tuple lengths don't match!", self.stmt) # check return type matches, sub type. for i, ret_x in enumerate(self.context.return_type.members): s_member = sub.typ.members[i] sub_type = s_member if isinstance(s_member, NodeType) else s_member.typ if type(sub_type) is not type(ret_x): raise StructureException( "Tuple return type does not match annotated return. " f"{type(sub_type)} != {type(ret_x)}", self.stmt ) return gen_tuple_return(self.stmt, self.context, sub) else: raise TypeMismatch(f"Can't return type {sub.typ}", self.stmt)
def parse_name(self): if self.stmt.id == "vdb": return LLLnode('debugger', typ=None, pos=getpos(self.stmt)) else: raise StructureException(f"Unsupported statement type: {type(self.stmt)}", self.stmt)
def _RLPlist(expr, args, kwargs, context): # Second argument must be a list of types if not isinstance(args[1], ast.List): raise TypeMismatchException( "Expecting list of types for second argument", args[1]) if len(args[1].elts) == 0: raise TypeMismatchException("RLP list must have at least one item", expr) if len(args[1].elts) > 32: raise TypeMismatchException("RLP list must have at most 32 items", expr) # Get the output format _format = [] for arg in args[1].elts: if isinstance(arg, ast.Name) and arg.id == "bytes": subtyp = ByteArrayType(args[0].typ.maxlen) else: subtyp = parse_type(arg, 'memory') if not isinstance(subtyp, BaseType): raise TypeMismatchException( "RLP lists only accept BaseTypes and byte arrays", arg) if not is_base_type( subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')): raise TypeMismatchException( "Unsupported base type: %s" % subtyp.typ, arg) _format.append(subtyp) output_type = TupleType(_format) output_placeholder_type = ByteArrayType( (2 * len(_format) + 1 + get_size_of_type(output_type)) * 32) output_placeholder = context.new_placeholder(output_placeholder_type) output_node = LLLnode.from_list(output_placeholder, typ=output_placeholder_type, location='memory') # Create a decoder for each element in the tuple decoder = [] for i, typ in enumerate(_format): # Decoder for bytes32 if is_base_type(typ, 'bytes32'): decoder.append( LLLnode.from_list( [ 'seq', [ 'assert', [ 'eq', [ 'mload', [ 'add', output_node, [ 'mload', ['add', output_node, 32 * i] ] ] ], 32 ] ], [ 'mload', [ 'add', 32, [ 'add', output_node, ['mload', ['add', output_node, 32 * i]] ] ] ] ], typ, annotation='getting and checking bytes32 item')) # Decoder for address elif is_base_type(typ, 'address'): decoder.append( LLLnode.from_list( [ 'seq', [ 'assert', [ 'eq', [ 'mload', [ 'add', output_node, [ 'mload', ['add', output_node, 32 * i] ] ] ], 20 ] ], [ 'mod', [ 'mload', [ 'add', 20, [ 'add', output_node, [ 'mload', ['add', output_node, 32 * i] ] ] ] ], ['mload', MemoryPositions.ADDRSIZE] ] ], typ, annotation='getting and checking address item')) # Decoder for bytes elif isinstance(typ, ByteArrayType): decoder.append( LLLnode.from_list([ 'add', output_node, ['mload', ['add', output_node, 32 * i]] ], typ, location='memory', annotation='getting byte array')) # Decoder for num and uint256 elif is_base_type(typ, ('int128', 'uint256')): bytez = LLLnode.from_list( ['add', output_node, ['mload', ['add', output_node, 32 * i]]], typ, location='memory', annotation='getting and checking %s' % typ.typ) decoder.append(byte_array_to_num(bytez, expr, typ.typ)) # Decoder for bools elif is_base_type(typ, ('bool')): # This is basically a really clever way to test for a length-prefixed one or zero. We take the 32 bytes # starting one byte *after* the start of the length declaration; this includes the last 31 bytes of the # length and the first byte of the value. 0 corresponds to length 0, first byte 0, and 257 corresponds # to length 1, first byte \x01 decoder.append( LLLnode.from_list([ 'with', '_ans', [ 'mload', [ 'add', 1, [ 'add', output_node, ['mload', ['add', output_node, 32 * i]] ] ] ], [ 'seq', [ 'assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]] ], ['div', '_ans', 257] ] ], typ, annotation='getting and checking bool')) else: # Should never reach because of top level base level check. raise Exception("Type not yet supported") # pragma: no cover # Copy the input data to memory if args[0].location == "memory": variable_pointer = args[0] elif args[0].location == "storage": placeholder = context.new_placeholder(args[0].typ) placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory') copier = make_byte_array_copier( placeholder_node, LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location)) variable_pointer = [ 'with', '_ptr', args[0], ['seq', copier, placeholder_node] ] else: # Should never reach because of top level base level check. raise Exception("Location not yet supported") # pragma: no cover # Decode the input data initial_setter = LLLnode.from_list([ 'seq', [ 'with', '_sub', variable_pointer, [ 'pop', [ 'call', 1500 + 400 * len(_format) + 10 * len(args), LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'), 0, ['add', '_sub', 32], ['mload', '_sub'], output_node, 64 * len(_format) + 32 + 32 * get_size_of_type(output_type) ] ] ], ['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]] ], typ=None) # Shove the input data decoder in front of the first variable decoder decoder[0] = LLLnode.from_list(['seq', initial_setter, decoder[0]], typ=decoder[0].typ, location=decoder[0].location) return LLLnode.from_list(["multi"] + decoder, typ=output_type, location='memory', pos=getpos(expr))
def as_unitless_number(expr, args, kwargs, context): return LLLnode(value=args[0].value, args=args[0].args, typ=BaseType(args[0].typ.typ, {}), pos=getpos(expr))
def concat(expr, context): args = [Expr(arg, context).lll_node for arg in expr.args] if len(args) < 2: raise StructureException("Concat expects at least two arguments", expr) for expr_arg, arg in zip(expr.args, args): if not isinstance(arg.typ, ByteArrayType) and not is_base_type( arg.typ, 'bytes32'): raise TypeMismatchException( "Concat expects byte arrays or bytes32 objects", expr_arg) # Maximum length of the output total_maxlen = sum([ arg.typ.maxlen if isinstance(arg.typ, ByteArrayType) else 32 for arg in args ]) # Node representing the position of the output in memory placeholder = context.new_placeholder(ByteArrayType(total_maxlen)) # Object representing the output seq = [] # For each argument we are concatenating... for arg in args: # Start pasting into a position the starts at zero, and keeps # incrementing as we concatenate arguments placeholder_node = LLLnode.from_list(['add', placeholder, '_poz'], typ=ByteArrayType(total_maxlen), location='memory') placeholder_node_plus_32 = LLLnode.from_list( ['add', ['add', placeholder, '_poz'], 32], typ=ByteArrayType(total_maxlen), location='memory') if isinstance(arg.typ, ByteArrayType): # Ignore empty strings if arg.typ.maxlen == 0: continue # Get the length of the current argument if arg.location == "memory": length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128')) argstart = LLLnode.from_list(['add', '_arg', 32], typ=arg.typ, location=arg.location) elif arg.location == "storage": length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128')) argstart = LLLnode.from_list(['add', ['sha3_32', '_arg'], 1], typ=arg.typ, location=arg.location) # Make a copier to copy over data from that argyument seq.append([ 'with', '_arg', arg, [ 'seq', make_byte_slice_copier(placeholder_node_plus_32, argstart, length, arg.typ.maxlen, pos=getpos(expr)), # Change the position to start at the correct # place to paste the next value ['set', '_poz', ['add', '_poz', length]] ] ]) else: seq.append([ 'seq', [ 'mstore', ['add', placeholder_node, 32], unwrap_location(arg) ], ['set', '_poz', ['add', '_poz', 32]] ]) # The position, after all arguments are processing, equals the total # length. Paste this in to make the output a proper bytearray seq.append(['mstore', placeholder, '_poz']) # Memory location of the output seq.append(placeholder) return LLLnode.from_list(['with', '_poz', 0, ['seq'] + seq], typ=ByteArrayType(total_maxlen), location='memory', pos=getpos(expr), annotation='concat')
def extract32(expr, args, kwargs, context): sub, index = args ret_type = kwargs['type'] # Get length and specific element if sub.location == "memory": lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128')) elementgetter = lambda index: LLLnode.from_list( ['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]], typ=BaseType('int128')) elif sub.location == "storage": lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128')) elementgetter = lambda index: LLLnode.from_list( ['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]], typ=BaseType('int128')) # Special case: index known to be a multiple of 32 if isinstance(index.value, int) and not index.value % 32: o = LLLnode.from_list([ 'with', '_sub', sub, elementgetter( ['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32]) ], typ=BaseType(ret_type), annotation='extracting 32 bytes') # General case else: o = LLLnode.from_list([ 'with', '_sub', sub, [ 'with', '_len', lengetter, [ 'with', '_index', ['clamp', 0, index, ['sub', '_len', 32]], [ 'with', '_mi32', ['mod', '_index', 32], [ 'with', '_di32', ['div', '_index', 32], [ 'if', '_mi32', [ 'add', [ 'mul', elementgetter('_di32'), ['exp', 256, '_mi32'] ], [ 'div', elementgetter(['add', '_di32', 1]), ['exp', 256, ['sub', 32, '_mi32']] ] ], elementgetter('_di32') ] ] ] ] ] ], typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes') if ret_type == 'int128': return LLLnode.from_list([ 'clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM] ], typ=BaseType('int128'), pos=getpos(expr)) elif ret_type == 'address': return LLLnode.from_list( ['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]], typ=BaseType(ret_type), pos=getpos(expr)) else: return o
def parse_internal_function(code: ast.FunctionDef, sig: FunctionSignature, context: Context) -> LLLnode: """ Parse a internal function (FuncDef), and produce full function body. :param sig: the FuntionSignature :param code: ast of function :return: full sig compare & function body """ validate_internal_function(code, sig) # Get nonreentrant lock nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock( sig, context.global_ctx) # Create callback_ptr, this stores a destination in the bytecode for a internal # function to jump to after a function has executed. clampers: List[LLLnode] = [] # Allocate variable space. context.memory_allocator.increase_memory(sig.max_copy_size) _post_callback_ptr = f"{sig.name}_{sig.method_id}_post_callback_ptr" context.callback_ptr = context.new_placeholder(typ=BaseType("uint256")) clampers.append( LLLnode.from_list( ["mstore", context.callback_ptr, "pass"], annotation="pop callback pointer", )) if sig.total_default_args > 0: clampers.append(LLLnode.from_list(["label", _post_callback_ptr])) # internal functions without return types need to jump back to # the calling function, as there is no return statement to handle the # jump. if sig.output_type is None: stop_func = [["jump", ["mload", context.callback_ptr]]] else: stop_func = [["stop"]] # Generate copiers if len(sig.base_args) == 0: copier = ["pass"] clampers.append(LLLnode.from_list(copier)) elif sig.total_default_args == 0: copier = get_internal_arg_copier( total_size=sig.base_copy_size, memory_dest=MemoryPositions.RESERVED_MEMORY) clampers.append(LLLnode.from_list(copier)) # Fill variable positions for arg in sig.args: if isinstance(arg.typ, ByteArrayLike): mem_pos, _ = context.memory_allocator.increase_memory( 32 * get_size_of_type(arg.typ)) context.vars[arg.name] = VariableRecord(arg.name, mem_pos, arg.typ, False) else: context.vars[arg.name] = VariableRecord( arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False, ) # internal function copiers. No clamping for internal functions. dyn_variable_names = [ a.name for a in sig.base_args if isinstance(a.typ, ByteArrayLike) ] if dyn_variable_names: i_placeholder = context.new_placeholder(typ=BaseType("uint256")) unpackers: List[Any] = [] for idx, var_name in enumerate(dyn_variable_names): var = context.vars[var_name] ident = f"_load_args_{sig.method_id}_dynarg{idx}" o = make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var.pos) unpackers.append(o) if not unpackers: unpackers = ["pass"] # 0 added to complete full overarching 'seq' statement, see internal_label. unpackers.append(0) clampers.append( LLLnode.from_list( ["seq_unchecked"] + unpackers, typ=None, annotation="dynamic unpacker", pos=getpos(code), )) # Function has default arguments. if sig.total_default_args > 0: # Function with default parameters. default_sigs = sig_utils.generate_default_arg_sigs( code, context.sigs, context.global_ctx) sig_chain: List[Any] = ["seq"] for default_sig in default_sigs: sig_compare, internal_label = get_sig_statements( default_sig, getpos(code)) # Populate unset default variables set_defaults = [] for arg_name in get_default_names_to_set(sig, default_sig): value = Expr(sig.default_values[arg_name], context).lll_node var = context.vars[arg_name] left = LLLnode.from_list(var.pos, typ=var.typ, location="memory", pos=getpos(code), mutable=var.mutable) set_defaults.append( make_setter(left, value, "memory", pos=getpos(code))) current_sig_arg_names = [x.name for x in default_sig.args] # Load all variables in default section, if internal, # because the stack is a linear pipe. copier_arg_count = len(default_sig.args) copier_arg_names = current_sig_arg_names # Order copier_arg_names, this is very important. copier_arg_names = [ x.name for x in default_sig.args if x.name in copier_arg_names ] # Variables to be populated from calldata/stack. default_copiers: List[Any] = [] if copier_arg_count > 0: # Get map of variables in calldata, with thier offsets offset = 4 calldata_offset_map = {} for arg in default_sig.args: calldata_offset_map[arg.name] = offset offset += (32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32) # Copy set default parameters from calldata dynamics = [] for arg_name in copier_arg_names: var = context.vars[arg_name] if isinstance(var.typ, ByteArrayLike): _size = 32 dynamics.append(var.pos) else: _size = var.size * 32 default_copiers.append( get_internal_arg_copier( memory_dest=var.pos, total_size=_size, )) # Unpack byte array if necessary. if dynamics: i_placeholder = context.new_placeholder( typ=BaseType("uint256")) for idx, var_pos in enumerate(dynamics): ident = f"unpack_default_sig_dyn_{default_sig.method_id}_arg{idx}" default_copiers.append( make_unpacker( ident=ident, i_placeholder=i_placeholder, begin_pos=var_pos, )) default_copiers.append(0) # for over arching seq, POP sig_chain.append([ "if", sig_compare, [ "seq", internal_label, LLLnode.from_list( ["mstore", context.callback_ptr, "pass"], annotation="pop callback pointer", pos=getpos(code), ), ["seq"] + set_defaults if set_defaults else ["pass"], ["seq_unchecked"] + default_copiers if default_copiers else ["pass"], ["goto", _post_callback_ptr], ], ]) # With internal functions all variable loading occurs in the default # function sub routine. _clampers = [["label", _post_callback_ptr]] # Function with default parameters. o = LLLnode.from_list( [ "seq", sig_chain, [ "if", 0, # can only be jumped into [ "seq", ["seq"] + nonreentrant_pre + _clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + stop_func, ], ], ], typ=None, pos=getpos(code), ) else: # Function without default parameters. sig_compare, internal_label = get_sig_statements(sig, getpos(code)) o = LLLnode.from_list( [ "if", sig_compare, ["seq"] + [internal_label] + nonreentrant_pre + clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + stop_func, ], typ=None, pos=getpos(code), ) return o return o
def pack_logging_data(arg_nodes, arg_types, context, pos): # Checks to see if there's any data if not arg_nodes: return ["seq"], 0, None, 0 holder = ["seq"] maxlen = len(arg_nodes) * 32 # total size of all packed args (upper limit) # Unroll any function calls, to temp variables. prealloacted = {} for idx, node in enumerate(arg_nodes): if isinstance(node, (vy_ast.Str, vy_ast.Call)) and node.get("func.id") != "empty": expr = Expr(node, context) source_lll = expr.lll_node tmp_variable = context.new_internal_variable(source_lll.typ) tmp_variable_node = LLLnode.from_list( tmp_variable, typ=source_lll.typ, pos=getpos(node), location="memory", annotation=f"log_prealloacted {source_lll.typ}", ) # Copy bytes. holder.append( make_setter(tmp_variable_node, source_lll, pos=getpos(node), location="memory") ) prealloacted[idx] = tmp_variable_node # Create internal variables for for dynamic and static args. static_types = [] for typ in arg_types: static_types.append(typ if not typ.is_dynamic_size else Uint256Definition()) requires_dynamic_offset = any(typ.is_dynamic_size for typ in arg_types) dynamic_offset_counter = None if requires_dynamic_offset: # TODO refactor out old type objects dynamic_offset_counter = context.new_internal_variable(BaseType(32)) dynamic_placeholder = context.new_internal_variable(BaseType(32)) static_vars = [context.new_internal_variable(i) for i in static_types] # Populate static placeholders. for i, (node, typ) in enumerate(zip(arg_nodes, arg_types)): placeholder = static_vars[i] if not isinstance(typ, ArrayValueAbstractType): holder, maxlen = pack_args_by_32( holder, maxlen, prealloacted.get(i, node), typ, context, placeholder, pos=pos, ) # Dynamic position starts right after the static args. if requires_dynamic_offset: holder.append(LLLnode.from_list(["mstore", dynamic_offset_counter, maxlen])) # Calculate maximum dynamic offset placeholders, used for gas estimation. for typ in arg_types: if typ.is_dynamic_size: maxlen += typ.size_in_bytes if requires_dynamic_offset: datamem_start = dynamic_placeholder + 32 else: datamem_start = static_vars[0] # Copy necessary data into allocated dynamic section. for i, (node, typ) in enumerate(zip(arg_nodes, arg_types)): if isinstance(typ, ArrayValueAbstractType): if isinstance(node, vy_ast.Call) and node.func.get("id") == "empty": # TODO add support for this raise StructureException( "Cannot use `empty` on Bytes or String types within an event log", node ) pack_args_by_32( holder=holder, maxlen=maxlen, arg=prealloacted.get(i, node), typ=typ, context=context, placeholder=static_vars[i], datamem_start=datamem_start, dynamic_offset_counter=dynamic_offset_counter, pos=pos, ) return holder, maxlen, dynamic_offset_counter, datamem_start
def parse_external_function( code: ast.FunctionDef, sig: FunctionSignature, context: Context ) -> LLLnode: """ Parse a external function (FuncDef), and produce full function body. :param sig: the FuntionSignature :param code: ast of function :return: full sig compare & function body """ validate_external_function(code, sig, context.global_ctx) # Get nonreentrant lock nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(sig, context.global_ctx) clampers = [] # Generate copiers copier: List[Any] = ["pass"] if not len(sig.base_args): copier = ["pass"] elif sig.name == "__init__": copier = ["codecopy", MemoryPositions.RESERVED_MEMORY, "~codelen", sig.base_copy_size] context.memory_allocator.increase_memory(sig.max_copy_size) clampers.append(copier) # Add asserts for payable and internal if sig.mutability != "payable": clampers.append(["assert", ["iszero", "callvalue"]]) # Fill variable positions default_args_start_pos = len(sig.base_args) for i, arg in enumerate(sig.args): if i < len(sig.base_args): clampers.append( make_arg_clamper( arg.pos, context.memory_allocator.get_next_memory_position(), arg.typ, sig.name == "__init__", ) ) if isinstance(arg.typ, ByteArrayLike): mem_pos, _ = context.memory_allocator.increase_memory(32 * get_size_of_type(arg.typ)) context.vars[arg.name] = VariableRecord(arg.name, mem_pos, arg.typ, False) else: if sig.name == "__init__": context.vars[arg.name] = VariableRecord( arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False, ) elif i >= default_args_start_pos: # default args need to be allocated in memory. type_size = get_size_of_type(arg.typ) * 32 default_arg_pos, _ = context.memory_allocator.increase_memory(type_size) context.vars[arg.name] = VariableRecord( name=arg.name, pos=default_arg_pos, typ=arg.typ, mutable=False, ) else: context.vars[arg.name] = VariableRecord( name=arg.name, pos=4 + arg.pos, typ=arg.typ, mutable=False, location="calldata" ) # Create "clampers" (input well-formedness checkers) # Return function body if sig.name == "__init__": o = LLLnode.from_list( ["seq"] + clampers + [parse_body(code.body, context)], # type: ignore pos=getpos(code), ) # Is default function. elif sig.is_default_func(): if len(sig.args) > 0: raise FunctionDeclarationException( "Default function may not receive any arguments.", code ) o = LLLnode.from_list( ["seq"] + clampers + [parse_body(code.body, context)], # type: ignore pos=getpos(code), ) # Is a normal function. else: # Function with default parameters. if sig.total_default_args > 0: function_routine = f"{sig.name}_{sig.method_id}" default_sigs = sig_utils.generate_default_arg_sigs( code, context.sigs, context.global_ctx ) sig_chain: List[Any] = ["seq"] for default_sig in default_sigs: sig_compare, _ = get_sig_statements(default_sig, getpos(code)) # Populate unset default variables set_defaults = [] for arg_name in get_default_names_to_set(sig, default_sig): value = Expr(sig.default_values[arg_name], context).lll_node var = context.vars[arg_name] left = LLLnode.from_list( var.pos, typ=var.typ, location="memory", pos=getpos(code), mutable=var.mutable, ) set_defaults.append(make_setter(left, value, "memory", pos=getpos(code))) current_sig_arg_names = {x.name for x in default_sig.args} base_arg_names = {arg.name for arg in sig.base_args} copier_arg_count = len(default_sig.args) - len(sig.base_args) copier_arg_names = list(current_sig_arg_names - base_arg_names) # Order copier_arg_names, this is very important. copier_arg_names = [x.name for x in default_sig.args if x.name in copier_arg_names] # Variables to be populated from calldata/stack. default_copiers: List[Any] = [] if copier_arg_count > 0: # Get map of variables in calldata, with thier offsets offset = 4 calldata_offset_map = {} for arg in default_sig.args: calldata_offset_map[arg.name] = offset offset += ( 32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32 ) # Copy default parameters from calldata. for arg_name in copier_arg_names: var = context.vars[arg_name] calldata_offset = calldata_offset_map[arg_name] # Add clampers. default_copiers.append( make_arg_clamper(calldata_offset - 4, var.pos, var.typ,) ) # Add copying code. _offset: Union[int, List[Any]] = calldata_offset if isinstance(var.typ, ByteArrayLike): _offset = ["add", 4, ["calldataload", calldata_offset]] default_copiers.append( get_external_arg_copier( memory_dest=var.pos, total_size=var.size * 32, offset=_offset, ) ) default_copiers.append(0) # for over arching seq, POP sig_chain.append( [ "if", sig_compare, [ "seq", ["seq"] + set_defaults if set_defaults else ["pass"], ["seq_unchecked"] + default_copiers if default_copiers else ["pass"], ["goto", function_routine], ], ] ) # Function with default parameters. o = LLLnode.from_list( [ "seq", sig_chain, [ "if", 0, # can only be jumped into [ "seq", ["label", function_routine], ["seq"] + nonreentrant_pre + clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + [["stop"]], ], ], ], typ=None, pos=getpos(code), ) else: # Function without default parameters. sig_compare, _ = get_sig_statements(sig, getpos(code)) o = LLLnode.from_list( [ "if", sig_compare, ["seq"] + nonreentrant_pre + clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + [["stop"]], ], typ=None, pos=getpos(code), ) return o
def parse_continue(self): return LLLnode.from_list('continue', typ=None, pos=getpos(self.stmt))
def bitwise_not(expr, args, kwargs, context): return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))
def parse_break(self): return LLLnode.from_list('break', typ=None, pos=getpos(self.stmt))
def ann_assign(self): with self.context.assignment_scope(): typ = parse_type( self.stmt.annotation, location='memory', custom_units=self.context.custom_units, custom_structs=self.context.structs, constants=self.context.constants, ) if isinstance(self.stmt.target, vy_ast.Attribute): raise TypeMismatch( f'May not set type for field {self.stmt.target.attr}', self.stmt, ) varname = self.stmt.target.id pos = self.context.new_variable(varname, typ) if self.stmt.value is None: raise StructureException( 'New variables must be initialized explicitly', self.stmt) sub = Expr(self.stmt.value, self.context).lll_node # Disallow assignment to None if isinstance(sub.typ, NullType): raise InvalidLiteral( ( 'Assignment to None is not allowed, use a default ' 'value or built-in `clear()`.' ), self.stmt ) is_literal_bytes32_assign = ( isinstance(sub.typ, ByteArrayType) and sub.typ.maxlen == 32 and isinstance(typ, BaseType) and typ.typ == 'bytes32' and sub.typ.is_literal ) # If bytes[32] to bytes32 assignment rewrite sub as bytes32. if is_literal_bytes32_assign: sub = LLLnode( bytes_to_int(self.stmt.value.s), typ=BaseType('bytes32'), pos=getpos(self.stmt), ) self._check_valid_assign(sub) self._check_same_variable_assign(sub) variable_loc = LLLnode.from_list( pos, typ=typ, location='memory', pos=getpos(self.stmt), ) o = make_setter(variable_loc, sub, 'memory', pos=getpos(self.stmt)) # o.pos = getpos(self.stmt) # TODO: Should this be here like in assign()? return o
def parse_pass(self): return LLLnode.from_list('pass', typ=None, pos=getpos(self.stmt))
def assign(self): # Assignment (e.g. x[4] = y) if len(self.stmt.targets) != 1: raise StructureException("Assignment statement must have one target", self.stmt) with self.context.assignment_scope(): sub = Expr(self.stmt.value, self.context).lll_node # Disallow assignment to None if isinstance(sub.typ, NullType): raise InvalidLiteral( ( 'Assignment to None is not allowed, use a default value ' 'or built-in `clear()`.' ), self.stmt, ) is_valid_rlp_list_assign = ( isinstance(self.stmt.value, vy_ast.Call) ) and getattr(self.stmt.value.func, 'id', '') == 'RLPList' # Determine if it's an RLPList assignment. if is_valid_rlp_list_assign: pos = self.context.new_variable(self.stmt.targets[0].id, sub.typ) variable_loc = LLLnode.from_list( pos, typ=sub.typ, location='memory', pos=getpos(self.stmt), annotation=self.stmt.targets[0].id, ) o = make_setter(variable_loc, sub, 'memory', pos=getpos(self.stmt)) else: # Error check when assigning to declared variable if isinstance(self.stmt.targets[0], vy_ast.Name): # Do not allow assignment to undefined variables without annotation if self.stmt.targets[0].id not in self.context.vars: raise VariableDeclarationException("Variable type not defined", self.stmt) # Check against implicit conversion self._check_implicit_conversion(self.stmt.targets[0].id, sub) is_valid_tuple_assign = ( isinstance(self.stmt.targets[0], vy_ast.Tuple) ) and isinstance(self.stmt.value, vy_ast.Tuple) # Do no allow tuple-to-tuple assignment if is_valid_tuple_assign: raise VariableDeclarationException( "Tuple to tuple assignment not supported", self.stmt, ) # Checks to see if assignment is valid target = self.get_target(self.stmt.targets[0]) if isinstance(target.typ, ContractType) and not isinstance(sub.typ, ContractType): raise TypeMismatch( 'Contract assignment expects casted address: ' f'{target.typ.unit}(<address_var>)', self.stmt ) o = make_setter(target, sub, target.location, pos=getpos(self.stmt)) o.pos = getpos(self.stmt) return o
def parse_func(code, sigs, origcode, global_ctx, _vars=None): if _vars is None: _vars = {} sig = FunctionSignature.from_definition( code, sigs=sigs, custom_units=global_ctx._custom_units, custom_structs=global_ctx._structs, constants=global_ctx._constants) # Get base args for function. total_default_args = len(code.args.defaults) base_args = sig.args[: -total_default_args] if total_default_args > 0 else sig.args default_args = code.args.args[-total_default_args:] default_values = dict( zip([arg.arg for arg in default_args], code.args.defaults)) # __init__ function may not have defaults. if sig.name == '__init__' and total_default_args > 0: raise FunctionDeclarationException( "__init__ function may not have default parameters.") # Check for duplicate variables with globals for arg in sig.args: if arg.name in global_ctx._globals: raise FunctionDeclarationException( "Variable name duplicated between function arguments and globals: " + arg.name) nonreentrant_pre = [['pass']] nonreentrant_post = [['pass']] if sig.nonreentrant_key: nkey = global_ctx.get_nonrentrant_counter(sig.nonreentrant_key) nonreentrant_pre = [[ 'seq', ['assert', ['iszero', ['sload', nkey]]], ['sstore', nkey, 1] ]] nonreentrant_post = [['sstore', nkey, 0]] # Create a local (per function) context. context = Context( vars=_vars, global_ctx=global_ctx, sigs=sigs, return_type=sig.output_type, constancy=Constancy.Constant if sig.const else Constancy.Mutable, is_payable=sig.payable, origcode=origcode, is_private=sig.private, method_id=sig.method_id) # Copy calldata to memory for fixed-size arguments max_copy_size = sum([ 32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32 for arg in sig.args ]) base_copy_size = sum([ 32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32 for arg in base_args ]) context.next_mem += max_copy_size clampers = [] # Create callback_ptr, this stores a destination in the bytecode for a private # function to jump to after a function has executed. _post_callback_ptr = "{}_{}_post_callback_ptr".format( sig.name, sig.method_id) if sig.private: context.callback_ptr = context.new_placeholder(typ=BaseType('uint256')) clampers.append( LLLnode.from_list( ['mstore', context.callback_ptr, 'pass'], annotation='pop callback pointer', )) if total_default_args > 0: clampers.append(['label', _post_callback_ptr]) # private functions without return types need to jump back to # the calling function, as there is no return statement to handle the # jump. stop_func = [['stop']] if sig.output_type is None and sig.private: stop_func = [['jump', ['mload', context.callback_ptr]]] if not len(base_args): copier = 'pass' elif sig.name == '__init__': copier = [ 'codecopy', MemoryPositions.RESERVED_MEMORY, '~codelen', base_copy_size ] else: copier = get_arg_copier(sig=sig, total_size=base_copy_size, memory_dest=MemoryPositions.RESERVED_MEMORY) clampers.append(copier) # Add asserts for payable and internal # private never gets payable check. if not sig.payable and not sig.private: clampers.append(['assert', ['iszero', 'callvalue']]) # Fill variable positions for i, arg in enumerate(sig.args): if i < len(base_args) and not sig.private: clampers.append( make_clamper( arg.pos, context.next_mem, arg.typ, sig.name == '__init__', )) if isinstance(arg.typ, ByteArrayLike): context.vars[arg.name] = VariableRecord(arg.name, context.next_mem, arg.typ, False) context.next_mem += 32 * get_size_of_type(arg.typ) else: context.vars[arg.name] = VariableRecord( arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False, ) # Private function copiers. No clamping for private functions. dyn_variable_names = [ a.name for a in base_args if isinstance(a.typ, ByteArrayLike) ] if sig.private and dyn_variable_names: i_placeholder = context.new_placeholder(typ=BaseType('uint256')) unpackers = [] for idx, var_name in enumerate(dyn_variable_names): var = context.vars[var_name] ident = "_load_args_%d_dynarg%d" % (sig.method_id, idx) o = make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var.pos) unpackers.append(o) if not unpackers: unpackers = ['pass'] clampers.append( LLLnode.from_list( # [0] to complete full overarching 'seq' statement, see private_label. ['seq_unchecked'] + unpackers + [0], typ=None, annotation='dynamic unpacker', pos=getpos(code), )) # Create "clampers" (input well-formedness checkers) # Return function body if sig.name == '__init__': o = LLLnode.from_list( ['seq'] + clampers + [parse_body(code.body, context)], pos=getpos(code), ) elif is_default_func(sig): if len(sig.args) > 0: raise FunctionDeclarationException( 'Default function may not receive any arguments.', code) if sig.private: raise FunctionDeclarationException( 'Default function may only be public.', code, ) o = LLLnode.from_list( ['seq'] + clampers + [parse_body(code.body, context)], pos=getpos(code), ) else: if total_default_args > 0: # Function with default parameters. function_routine = "{}_{}".format(sig.name, sig.method_id) default_sigs = sig_utils.generate_default_arg_sigs( code, sigs, global_ctx) sig_chain = ['seq'] for default_sig in default_sigs: sig_compare, private_label = get_sig_statements( default_sig, getpos(code)) # Populate unset default variables populate_arg_count = len(sig.args) - len(default_sig.args) set_defaults = [] if populate_arg_count > 0: current_sig_arg_names = {x.name for x in default_sig.args} missing_arg_names = [ arg.arg for arg in default_args if arg.arg not in current_sig_arg_names ] for arg_name in missing_arg_names: value = Expr(default_values[arg_name], context).lll_node var = context.vars[arg_name] left = LLLnode.from_list(var.pos, typ=var.typ, location='memory', pos=getpos(code), mutable=var.mutable) set_defaults.append( make_setter(left, value, 'memory', pos=getpos(code))) current_sig_arg_names = {x.name for x in default_sig.args} base_arg_names = {arg.name for arg in base_args} if sig.private: # Load all variables in default section, if private, # because the stack is a linear pipe. copier_arg_count = len(default_sig.args) copier_arg_names = current_sig_arg_names else: copier_arg_count = len(default_sig.args) - len(base_args) copier_arg_names = current_sig_arg_names - base_arg_names # Order copier_arg_names, this is very important. copier_arg_names = [ x.name for x in default_sig.args if x.name in copier_arg_names ] # Variables to be populated from calldata/stack. default_copiers = [] if copier_arg_count > 0: # Get map of variables in calldata, with thier offsets offset = 4 calldata_offset_map = {} for arg in default_sig.args: calldata_offset_map[arg.name] = offset offset += (32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32) # Copy set default parameters from calldata dynamics = [] for arg_name in copier_arg_names: var = context.vars[arg_name] calldata_offset = calldata_offset_map[arg_name] if sig.private: _offset = calldata_offset if isinstance(var.typ, ByteArrayLike): _size = 32 dynamics.append(var.pos) else: _size = var.size * 32 default_copiers.append( get_arg_copier( sig=sig, memory_dest=var.pos, total_size=_size, offset=_offset, )) else: # Add clampers. default_copiers.append( make_clamper( calldata_offset - 4, var.pos, var.typ, )) # Add copying code. if isinstance(var.typ, ByteArrayLike): _offset = [ 'add', 4, ['calldataload', calldata_offset] ] else: _offset = calldata_offset default_copiers.append( get_arg_copier( sig=sig, memory_dest=var.pos, total_size=var.size * 32, offset=_offset, )) # Unpack byte array if necessary. if dynamics: i_placeholder = context.new_placeholder( typ=BaseType('uint256')) for idx, var_pos in enumerate(dynamics): ident = 'unpack_default_sig_dyn_%d_arg%d' % ( default_sig.method_id, idx) default_copiers.append( make_unpacker( ident=ident, i_placeholder=i_placeholder, begin_pos=var_pos, )) default_copiers.append(0) # for over arching seq, POP sig_chain.append([ 'if', sig_compare, [ 'seq', private_label, ['pass'] if not sig.private else LLLnode.from_list([ 'mstore', context.callback_ptr, 'pass', ], annotation='pop callback pointer', pos=getpos(code)), ['seq'] + set_defaults if set_defaults else ['pass'], ['seq_unchecked'] + default_copiers if default_copiers else ['pass'], [ 'goto', _post_callback_ptr if sig.private else function_routine ] ] ]) # With private functions all variable loading occurs in the default # function sub routine. if sig.private: _clampers = [['label', _post_callback_ptr]] else: _clampers = clampers # Function with default parameters. o = LLLnode.from_list( [ 'seq', sig_chain, [ 'if', 0, # can only be jumped into [ 'seq', ['label', function_routine] if not sig.private else ['pass'], ['seq'] + nonreentrant_pre + _clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + stop_func ], ], ], typ=None, pos=getpos(code)) else: # Function without default parameters. sig_compare, private_label = get_sig_statements(sig, getpos(code)) o = LLLnode.from_list([ 'if', sig_compare, ['seq'] + [private_label] + nonreentrant_pre + clampers + [parse_body(c, context) for c in code.body] + nonreentrant_post + stop_func ], typ=None, pos=getpos(code)) # Check for at leasts one return statement if necessary. if context.return_type and context.function_return_count == 0: raise FunctionDeclarationException( "Missing return statement in function '%s' " % sig.name, code) o.context = context o.total_gas = o.gas + calc_mem_gas(o.context.next_mem) o.func_name = sig.name return o
def call(self): is_self_function = ( isinstance(self.stmt.func, vy_ast.Attribute) ) and isinstance(self.stmt.func.value, vy_ast.Name) and self.stmt.func.value.id == "self" is_log_call = ( isinstance(self.stmt.func, vy_ast.Attribute) ) and isinstance(self.stmt.func.value, vy_ast.Name) and self.stmt.func.value.id == 'log' if isinstance(self.stmt.func, vy_ast.Name): if self.stmt.func.id in STMT_DISPATCH_TABLE: if self.stmt.func.id == 'clear': return self._clear() else: return STMT_DISPATCH_TABLE[self.stmt.func.id](self.stmt, self.context) elif self.stmt.func.id in DISPATCH_TABLE: raise StructureException( f"Function {self.stmt.func.id} can not be called without being used.", self.stmt, ) else: raise StructureException( f"Unknown function: '{self.stmt.func.id}'.", self.stmt, ) elif is_self_function: return self_call.make_call(self.stmt, self.context) elif is_log_call: if self.stmt.func.attr not in self.context.sigs['self']: raise EventDeclarationException(f"Event not declared yet: {self.stmt.func.attr}") event = self.context.sigs['self'][self.stmt.func.attr] if len(event.indexed_list) != len(self.stmt.args): raise EventDeclarationException( f"{event.name} received {len(self.stmt.args)} arguments but " f"expected {len(event.indexed_list)}" ) expected_topics, topics = [], [] expected_data, data = [], [] for pos, is_indexed in enumerate(event.indexed_list): if is_indexed: expected_topics.append(event.args[pos]) topics.append(self.stmt.args[pos]) else: expected_data.append(event.args[pos]) data.append(self.stmt.args[pos]) topics = pack_logging_topics( event.event_id, topics, expected_topics, self.context, pos=getpos(self.stmt), ) inargs, inargsize, inargsize_node, inarg_start = pack_logging_data( expected_data, data, self.context, pos=getpos(self.stmt), ) if inargsize_node is None: sz = inargsize else: sz = ['mload', inargsize_node] return LLLnode.from_list([ 'seq', inargs, LLLnode.from_list( ["log" + str(len(topics)), inarg_start, sz] + topics, add_gas_estimate=inargsize * 10, ) ], typ=None, pos=getpos(self.stmt)) else: return external_call.make_external_call(self.stmt, self.context)
def call_self_private(stmt_expr, context, sig): # ** Private Call ** # Steps: # (x) push current local variables # (x) push arguments # (x) push jumpdest (callback ptr) # (x) jump to label # (x) pop return values # (x) pop local variables method_name, expr_args, sig = call_lookup_specs(stmt_expr, context) pre_init = [] pop_local_vars = [] push_local_vars = [] pop_return_values = [] push_args = [] # Push local variables. if context.vars: var_slots = [(v.pos, v.size) for name, v in context.vars.items()] var_slots.sort(key=lambda x: x[0]) mem_from, mem_to = var_slots[0][ 0], var_slots[-1][0] + var_slots[-1][1] * 32 push_local_vars = [['mload', pos] for pos in range(mem_from, mem_to, 32)] pop_local_vars = [['mstore', pos, 'pass'] for pos in reversed(range(mem_from, mem_to, 32))] # Push Arguments if expr_args: inargs, inargsize, arg_pos = pack_arguments( sig, expr_args, context, return_placeholder=False, pos=getpos(stmt_expr), ) push_args += [ inargs ] # copy arguments first, to not mess up the push/pop sequencing. static_arg_size = 32 * sum( [get_static_size_of_type(arg.typ) for arg in expr_args]) static_pos = arg_pos + static_arg_size total_arg_size = ceil32(inargsize - 4) if static_arg_size != total_arg_size: # requires dynamic section. ident = 'push_args_%d_%d_%d' % (sig.method_id, stmt_expr.lineno, stmt_expr.col_offset) start_label = ident + '_start' end_label = ident + '_end' i_placeholder = context.new_placeholder(BaseType('uint256')) push_args += [ ['mstore', i_placeholder, arg_pos + total_arg_size], ['label', start_label], [ 'if', ['lt', ['mload', i_placeholder], static_pos], ['goto', end_label] ], [ 'if_unchecked', ['ne', ['mload', ['mload', i_placeholder]], 0], ['mload', ['mload', i_placeholder]], ], [ 'mstore', i_placeholder, ['sub', ['mload', i_placeholder], 32] ], # decrease i ['goto', start_label], ['label', end_label] ] # push static section push_args += [['mload', pos] for pos in reversed(range(arg_pos, static_pos, 32))] # Jump to function label. jump_to_func = [ ['add', ['pc'], 6], # set callback pointer. ['goto', 'priv_{}'.format(sig.method_id)], ['jumpdest'], ] # Pop return values. returner = [0] if sig.output_type: output_placeholder, returner, output_size = call_make_placeholder( stmt_expr, context, sig) if output_size > 0: dynamic_offsets = [] if isinstance(sig.output_type, (BaseType, ListType)): pop_return_values = [[ 'mstore', ['add', output_placeholder, pos], 'pass' ] for pos in range(0, output_size, 32)] elif isinstance(sig.output_type, ByteArrayLike): dynamic_offsets = [(0, sig.output_type)] pop_return_values = [ ['pop', 'pass'], ] elif isinstance(sig.output_type, TupleLike): static_offset = 0 pop_return_values = [] for out_type in sig.output_type.members: if isinstance(out_type, ByteArrayLike): pop_return_values.append([ 'mstore', ['add', output_placeholder, static_offset], 'pass' ]) dynamic_offsets.append(([ 'mload', ['add', output_placeholder, static_offset] ], out_type)) else: pop_return_values.append([ 'mstore', ['add', output_placeholder, static_offset], 'pass' ]) static_offset += 32 # append dynamic unpacker. dyn_idx = 0 for in_memory_offset, _out_type in dynamic_offsets: ident = "%d_%d_arg_%d" % (stmt_expr.lineno, stmt_expr.col_offset, dyn_idx) dyn_idx += 1 start_label = 'dyn_unpack_start_' + ident end_label = 'dyn_unpack_end_' + ident i_placeholder = context.new_placeholder( typ=BaseType('uint256')) begin_pos = ['add', output_placeholder, in_memory_offset] # loop until length. o = LLLnode.from_list( [ 'seq_unchecked', ['mstore', begin_pos, 'pass'], # get len ['mstore', i_placeholder, 0], ['label', start_label], [ # break 'if', [ 'ge', ['mload', i_placeholder], ['ceil32', ['mload', begin_pos]] ], ['goto', end_label] ], [ # pop into correct memory slot. 'mstore', [ 'add', ['add', begin_pos, 32], ['mload', i_placeholder] ], 'pass', ], # increment i [ 'mstore', i_placeholder, ['add', 32, ['mload', i_placeholder]] ], ['goto', start_label], ['label', end_label] ], typ=None, annotation='dynamic unpacker', pos=getpos(stmt_expr)) pop_return_values.append(o) call_body = list( itertools.chain( ['seq_unchecked'], pre_init, push_local_vars, push_args, jump_to_func, pop_return_values, pop_local_vars, [returner], )) # If we have no return, we need to pop off pop_returner_call_body = ['pop', call_body ] if sig.output_type is None else call_body o = LLLnode.from_list(pop_returner_call_body, typ=sig.output_type, location='memory', pos=getpos(stmt_expr), annotation='Internal Call: %s' % method_name, add_gas_estimate=sig.gas) o.gas += sig.gas return o
def _assert_unreachable(test_expr, msg): return LLLnode.from_list(['assert_unreachable', test_expr], typ=None, pos=getpos(msg))
def to_decimal(expr, args, kwargs, context): in_arg = args[0] input_type, _ = get_type(in_arg) if input_type == "Bytes": if in_arg.typ.maxlen > 32: raise TypeMismatch( f"Cannot convert bytes array of max length {in_arg.typ.maxlen} to decimal", expr, ) num = byte_array_to_num(in_arg, expr, "int128") return LLLnode.from_list(["mul", num, DECIMAL_DIVISOR], typ=BaseType("decimal"), pos=getpos(expr)) else: if input_type == "uint256": if in_arg.typ.is_literal: if not SizeLimits.in_bounds("int128", (in_arg.value * DECIMAL_DIVISOR)): raise InvalidLiteral( f"Number out of range: {in_arg.value}", expr, ) else: return LLLnode.from_list(["mul", in_arg, DECIMAL_DIVISOR], typ=BaseType("decimal"), pos=getpos(expr)) else: return LLLnode.from_list( [ "uclample", ["mul", in_arg, DECIMAL_DIVISOR], ["mload", MemoryPositions.MAXDECIMAL], ], typ=BaseType("decimal"), pos=getpos(expr), ) elif input_type == "address": return LLLnode.from_list( [ "mul", [ "signextend", 15, ["and", in_arg, (SizeLimits.ADDRSIZE - 1)] ], DECIMAL_DIVISOR, ], typ=BaseType("decimal"), pos=getpos(expr), ) elif input_type == "bytes32": if in_arg.typ.is_literal: if not SizeLimits.in_bounds("int128", (in_arg.value * DECIMAL_DIVISOR)): raise InvalidLiteral( f"Number out of range: {in_arg.value}", expr, ) else: return LLLnode.from_list(["mul", in_arg, DECIMAL_DIVISOR], typ=BaseType("decimal"), pos=getpos(expr)) else: return LLLnode.from_list( [ "clamp", ["mload", MemoryPositions.MINDECIMAL], ["mul", in_arg, DECIMAL_DIVISOR], ["mload", MemoryPositions.MAXDECIMAL], ], typ=BaseType("decimal"), pos=getpos(expr), ) elif input_type in ("int128", "bool"): return LLLnode.from_list(["mul", in_arg, DECIMAL_DIVISOR], typ=BaseType("decimal"), pos=getpos(expr)) else: raise InvalidLiteral(f"Invalid input for decimal: {in_arg}", expr)
def parse_for(self): # Type 0 for, e.g. for i in list(): ... if self._is_list_iter(): return self.parse_for_list() if not isinstance(self.stmt.iter, vy_ast.Call): if isinstance(self.stmt.iter, vy_ast.Subscript): raise StructureException("Cannot iterate over a nested list", self.stmt.iter) raise StructureException( f"Cannot iterate over '{type(self.stmt.iter).__name__}' object", self.stmt.iter ) if getattr(self.stmt.iter.func, 'id', None) != "range": raise StructureException( "Non-literals cannot be used as loop range", self.stmt.iter.func ) if len(self.stmt.iter.args) not in {1, 2}: raise StructureException( f"Range expects between 1 and 2 arguments, got {len(self.stmt.iter.args)}", self.stmt.iter.func ) block_scope_id = id(self.stmt) with self.context.make_blockscope(block_scope_id): # Get arg0 arg0 = self.stmt.iter.args[0] num_of_args = len(self.stmt.iter.args) # Type 1 for, e.g. for i in range(10): ... if num_of_args == 1: arg0_val = self._get_range_const_value(arg0) start = LLLnode.from_list(0, typ='int128', pos=getpos(self.stmt)) rounds = arg0_val # Type 2 for, e.g. for i in range(100, 110): ... elif self._check_valid_range_constant(self.stmt.iter.args[1], raise_exception=False)[0]: arg0_val = self._get_range_const_value(arg0) arg1_val = self._get_range_const_value(self.stmt.iter.args[1]) start = LLLnode.from_list(arg0_val, typ='int128', pos=getpos(self.stmt)) rounds = LLLnode.from_list(arg1_val - arg0_val, typ='int128', pos=getpos(self.stmt)) # Type 3 for, e.g. for i in range(x, x + 10): ... else: arg1 = self.stmt.iter.args[1] if not isinstance(arg1, vy_ast.BinOp) or not isinstance(arg1.op, vy_ast.Add): raise StructureException( ( "Two-arg for statements must be of the form `for i " "in range(start, start + rounds): ...`" ), arg1, ) if arg0 != arg1.left: raise StructureException( ( "Two-arg for statements of the form `for i in " "range(x, x + y): ...` must have x identical in both " f"places: {vy_ast.ast_to_dict(arg0)} {vy_ast.ast_to_dict(arg1.left)}" ), self.stmt.iter, ) rounds = self._get_range_const_value(arg1.right) start = Expr.parse_value_expr(arg0, self.context) r = rounds if isinstance(rounds, int) else rounds.value if r < 1: raise StructureException( f"For loop has invalid number of iterations ({r})," " the value must be greater than zero", self.stmt.iter ) varname = self.stmt.target.id pos = self.context.new_variable(varname, BaseType('int128'), pos=getpos(self.stmt)) self.context.forvars[varname] = True o = LLLnode.from_list( ['repeat', pos, start, rounds, parse_body(self.stmt.body, self.context)], typ=None, pos=getpos(self.stmt), ) del self.context.vars[varname] del self.context.forvars[varname] return o
def parse_sequence(base_node, elements, context): """ Generate an LLL node from a sequence of Vyper AST nodes, such as values inside a list/tuple or arguments inside a call. Arguments --------- base_node : VyperNode Parent node which contains the sequence being parsed. elements : List[VyperNode] A list of nodes within the sequence. context : Context Currently active local context. Returns ------- List[LLLNode] LLL nodes that must execute prior to generating the actual sequence in order to avoid memory corruption issues. This list may be empty, depending on the values within `elements`. List[LLLNode] LLL nodes which collectively represent `elements`. """ init_lll = [] sequence_lll = [] for node in elements: if isinstance(node, vy_ast.List): # for nested lists, ensure the init LLL is also processed before the values init, seq = parse_sequence(node, node.elements, context) init_lll.extend(init) out_type = next((i.typ for i in seq if not i.typ.is_literal), seq[0].typ) typ = ListType(out_type, len(node.elements), is_literal=True) multi_lll = LLLnode.from_list(["multi"] + seq, typ=typ, pos=getpos(node)) sequence_lll.append(multi_lll) continue lll_node = Expr(node, context).lll_node if isinstance( node, vy_ast.Call) or (isinstance(node, vy_ast.Subscript) and isinstance(node.value, vy_ast.Call)): # nodes which potentially create their own internal memory variables, and so must # be parsed prior to generating the final sequence to avoid memory corruption target = LLLnode.from_list( context.new_internal_variable(lll_node.typ), typ=lll_node.typ, location="memory", pos=getpos(base_node), ) init_lll.append( make_setter(target, lll_node, "memory", pos=getpos(base_node))) sequence_lll.append( LLLnode.from_list(target, typ=lll_node.typ, pos=getpos(base_node), location="memory"), ) else: sequence_lll.append(lll_node) return init_lll, sequence_lll
def parse_for_list(self): with self.context.range_scope(): iter_list_node = Expr(self.stmt.iter, self.context).lll_node if not isinstance(iter_list_node.typ.subtype, BaseType): # Sanity check on list subtype. raise StructureException('For loops allowed only on basetype lists.', self.stmt.iter) iter_var_type = ( self.context.vars.get(self.stmt.iter.id).typ if isinstance(self.stmt.iter, vy_ast.Name) else None ) subtype = iter_list_node.typ.subtype.typ varname = self.stmt.target.id value_pos = self.context.new_variable( varname, BaseType(subtype, unit=iter_list_node.typ.subtype.unit), ) i_pos_raw_name = '_index_for_' + varname i_pos = self.context.new_internal_variable( i_pos_raw_name, BaseType(subtype), ) self.context.forvars[varname] = True # Is a list that is already allocated to memory. if iter_var_type: list_name = self.stmt.iter.id # make sure list cannot be altered whilst iterating. with self.context.in_for_loop_scope(list_name): iter_var = self.context.vars.get(self.stmt.iter.id) if iter_var.location == 'calldata': fetcher = 'calldataload' elif iter_var.location == 'memory': fetcher = 'mload' else: raise CompilerPanic( f'List iteration only supported on in-memory types {self.expr}', ) body = [ 'seq', [ 'mstore', value_pos, [fetcher, ['add', iter_var.pos, ['mul', ['mload', i_pos], 32]]], ], parse_body(self.stmt.body, self.context) ] o = LLLnode.from_list( ['repeat', i_pos, 0, iter_var.size, body], typ=None, pos=getpos(self.stmt) ) # List gets defined in the for statement. elif isinstance(self.stmt.iter, vy_ast.List): # Allocate list to memory. count = iter_list_node.typ.count tmp_list = LLLnode.from_list( obj=self.context.new_placeholder(ListType(iter_list_node.typ.subtype, count)), typ=ListType(iter_list_node.typ.subtype, count), location='memory' ) setter = make_setter(tmp_list, iter_list_node, 'memory', pos=getpos(self.stmt)) body = [ 'seq', ['mstore', value_pos, ['mload', ['add', tmp_list, ['mul', ['mload', i_pos], 32]]]], parse_body(self.stmt.body, self.context) ] o = LLLnode.from_list( ['seq', setter, ['repeat', i_pos, 0, count, body]], typ=None, pos=getpos(self.stmt) ) # List contained in storage. elif isinstance(self.stmt.iter, vy_ast.Attribute): count = iter_list_node.typ.count list_name = iter_list_node.annotation # make sure list cannot be altered whilst iterating. with self.context.in_for_loop_scope(list_name): body = [ 'seq', [ 'mstore', value_pos, ['sload', ['add', ['sha3_32', iter_list_node], ['mload', i_pos]]] ], parse_body(self.stmt.body, self.context), ] o = LLLnode.from_list( ['seq', ['repeat', i_pos, 0, count, body]], typ=None, pos=getpos(self.stmt) ) del self.context.vars[varname] # this kind of open access to the vars dict should be disallowed. # we should use member functions to provide an API for these kinds # of operations. del self.context.vars[self.context._mangle(i_pos_raw_name)] del self.context.forvars[varname] return o
def parse_BinOp(self): left = Expr.parse_value_expr(self.expr.left, self.context) right = Expr.parse_value_expr(self.expr.right, self.context) if not is_numeric_type(left.typ) or not is_numeric_type(right.typ): return arithmetic_pair = {left.typ.typ, right.typ.typ} pos = getpos(self.expr) # Special case with uint256 were int literal may be casted. if arithmetic_pair == {"uint256", "int128"}: # Check right side literal. if right.typ.is_literal and SizeLimits.in_bounds( "uint256", right.value): right = LLLnode.from_list( right.value, typ=BaseType("uint256", None, is_literal=True), pos=pos, ) # Check left side literal. elif left.typ.is_literal and SizeLimits.in_bounds( "uint256", left.value): left = LLLnode.from_list( left.value, typ=BaseType("uint256", None, is_literal=True), pos=pos, ) if left.typ.typ == "decimal" and isinstance(self.expr.op, vy_ast.Pow): return # Only allow explicit conversions to occur. if left.typ.typ != right.typ.typ: return ltyp, rtyp = left.typ.typ, right.typ.typ arith = None if isinstance(self.expr.op, (vy_ast.Add, vy_ast.Sub)): new_typ = BaseType(ltyp) op = "add" if isinstance(self.expr.op, vy_ast.Add) else "sub" if ltyp == "uint256" and isinstance(self.expr.op, vy_ast.Add): # safeadd arith = [ "seq", ["assert", ["ge", ["add", "l", "r"], "l"]], ["add", "l", "r"] ] elif ltyp == "uint256" and isinstance(self.expr.op, vy_ast.Sub): # safesub arith = [ "seq", ["assert", ["ge", "l", "r"]], ["sub", "l", "r"] ] elif ltyp == rtyp: arith = [op, "l", "r"] elif isinstance(self.expr.op, vy_ast.Mult): new_typ = BaseType(ltyp) if ltyp == rtyp == "uint256": arith = [ "with", "ans", ["mul", "l", "r"], [ "seq", [ "assert", [ "or", ["eq", ["div", "ans", "l"], "r"], ["iszero", "l"] ] ], "ans", ], ] elif ltyp == rtyp == "int128": # TODO should this be 'smul' (note edge cases in YP for smul) arith = ["mul", "l", "r"] elif ltyp == rtyp == "decimal": # TODO should this be smul arith = [ "with", "ans", ["mul", "l", "r"], [ "seq", [ "assert", [ "or", ["eq", ["sdiv", "ans", "l"], "r"], ["iszero", "l"] ] ], ["sdiv", "ans", DECIMAL_DIVISOR], ], ] elif isinstance(self.expr.op, vy_ast.Div): if right.typ.is_literal and right.value == 0: return new_typ = BaseType(ltyp) if right.typ.is_literal: divisor = "r" else: # only apply the non-zero clamp when r is not a constant divisor = ["clamp_nonzero", "r"] if ltyp == rtyp == "uint256": arith = ["div", "l", divisor] elif ltyp == rtyp == "int128": arith = ["sdiv", "l", divisor] elif ltyp == rtyp == "decimal": arith = [ "sdiv", # TODO check overflow cases, also should it be smul ["mul", "l", DECIMAL_DIVISOR], divisor, ] elif isinstance(self.expr.op, vy_ast.Mod): if right.typ.is_literal and right.value == 0: return new_typ = BaseType(ltyp) if right.typ.is_literal: divisor = "r" else: # only apply the non-zero clamp when r is not a constant divisor = ["clamp_nonzero", "r"] if ltyp == rtyp == "uint256": arith = ["mod", "l", divisor] elif ltyp == rtyp: # TODO should this be regular mod arith = ["smod", "l", divisor] elif isinstance(self.expr.op, vy_ast.Pow): if ltyp != "int128" and ltyp != "uint256" and isinstance( self.expr.right, vy_ast.Name): return new_typ = BaseType(ltyp) if self.expr.left.get("value") == 1: return LLLnode.from_list([1], typ=new_typ, pos=pos) if self.expr.left.get("value") == 0: return LLLnode.from_list(["iszero", right], typ=new_typ, pos=pos) if ltyp == "int128": is_signed = True num_bits = 128 else: is_signed = False num_bits = 256 if isinstance(self.expr.left, vy_ast.Int): value = self.expr.left.value upper_bound = calculate_largest_power(value, num_bits, is_signed) + 1 # for signed integers, this also prevents negative values clamp = ["lt", right, upper_bound] return LLLnode.from_list( ["seq", ["assert", clamp], ["exp", left, right]], typ=new_typ, pos=pos, ) elif isinstance(self.expr.right, vy_ast.Int): value = self.expr.right.value upper_bound = calculate_largest_base(value, num_bits, is_signed) + 1 if is_signed: clamp = [ "and", ["slt", left, upper_bound], ["sgt", left, -upper_bound] ] else: clamp = ["lt", left, upper_bound] return LLLnode.from_list( ["seq", ["assert", clamp], ["exp", left, right]], typ=new_typ, pos=pos, ) else: # `a ** b` where neither `a` or `b` are known # TODO this is currently unreachable, once we implement a way to do it safely # remove the check in `vyper/context/types/value/numeric.py` return if arith is None: return p = ["seq"] if new_typ.typ == "int128": p.append(int128_clamp(arith)) elif new_typ.typ == "decimal": p.append([ "clamp", ["mload", MemoryPositions.MINDECIMAL], arith, ["mload", MemoryPositions.MAXDECIMAL], ]) elif new_typ.typ == "uint256": p.append(arith) else: return p = ["with", "l", left, ["with", "r", right, p]] return LLLnode.from_list(p, typ=new_typ, pos=pos)
def aug_assign(self): target = self.get_target(self.stmt.target) sub = Expr.parse_value_expr(self.stmt.value, self.context) if not isinstance(self.stmt.op, (ast.Add, ast.Sub, ast.Mult, ast.Div, ast.Mod)): raise StructureException("Unsupported operator for augassign", self.stmt) if not isinstance(target.typ, BaseType): raise TypeMismatchException( "Can only use aug-assign operators with simple types!", self.stmt.target) if target.location == 'storage': o = Expr.parse_value_expr( ast.BinOp( left=LLLnode.from_list(['sload', '_stloc'], typ=target.typ, pos=target.pos), right=sub, op=self.stmt.op, lineno=self.stmt.lineno, col_offset=self.stmt.col_offset, ), self.context, ) return LLLnode.from_list([ 'with', '_stloc', target, [ 'sstore', '_stloc', base_type_conversion( o, o.typ, target.typ, pos=getpos(self.stmt)), ], ], typ=None, pos=getpos(self.stmt)) elif target.location == 'memory': o = Expr.parse_value_expr( ast.BinOp( left=LLLnode.from_list(['mload', '_mloc'], typ=target.typ, pos=target.pos), right=sub, op=self.stmt.op, lineno=self.stmt.lineno, col_offset=self.stmt.col_offset, ), self.context, ) return LLLnode.from_list([ 'with', '_mloc', target, [ 'mstore', '_mloc', base_type_conversion( o, o.typ, target.typ, pos=getpos(self.stmt)), ], ], typ=None, pos=getpos(self.stmt))