def safe_add(x, y): assert x.typ is not None and x.typ == y.typ and isinstance(x.typ, BaseType) num_info = x.typ._num_info res = IRnode.from_list(["add", x, y], typ=x.typ.typ) if num_info.bits < 256: return clamp_basetype(res) # bits == 256 with res.cache_when_complex("ans") as (b1, res): if num_info.is_signed: # if r < 0: # ans < l # else: # ans >= l # aka (iszero (ans < l)) # aka: (r < 0) == (ans < l) ok = ["eq", ["slt", y, 0], ["slt", res, x]] else: # note this is "equivalent" to the unsigned form # of the above (because y < 0 == False) # ["eq", ["lt", y, 0], ["lt", res, x]] # TODO push down into optimizer rules. ok = ["ge", res, x] ret = IRnode.from_list(["seq", ["assert", ok], res]) return b1.resolve(ret)
def pop_dyn_array(darray_node, return_popped_item): assert isinstance(darray_node.typ, DArrayType) ret = ["seq"] with darray_node.cache_when_complex("darray") as (b1, darray_node): old_len = ["clamp_nonzero", get_dyn_array_count(darray_node)] new_len = IRnode.from_list(["sub", old_len, 1], typ="uint256") with new_len.cache_when_complex("new_len") as (b2, new_len): ret.append(STORE(darray_node, new_len)) # NOTE skip array bounds check bc we already asserted len two lines up if return_popped_item: popped_item = get_element_ptr(darray_node, new_len, array_bounds_check=False) ret.append(popped_item) typ = popped_item.typ location = popped_item.location encoding = popped_item.encoding else: typ, location, encoding = None, None, None return IRnode.from_list(b1.resolve(b2.resolve(ret)), typ=typ, location=location, encoding=encoding)
def parse_Hex(self): hexstr = self.expr.value t = self.expr._metadata.get("type") n_bytes = (len(hexstr) - 2) // 2 # e.g. "0x1234" is 2 bytes if t is not None: inferred_type = new_type_to_old_type(self.expr._metadata["type"]) # This branch is a band-aid to deal with bytes20 vs address literals # TODO handle this properly in the type checker elif len(hexstr) == 42: inferred_type = BaseType("address", is_literal=True) else: inferred_type = BaseType(f"bytes{n_bytes}", is_literal=True) if is_base_type(inferred_type, "address"): # sanity check typechecker did its job assert len(hexstr) == 42 and is_checksum_encoded(hexstr) typ = BaseType("address") return IRnode.from_list(int(self.expr.value, 16), typ=typ) elif is_bytes_m_type(inferred_type): assert n_bytes == inferred_type._bytes_info.m # bytes_m types are left padded with zeros val = int(hexstr, 16) << 8 * (32 - n_bytes) typ = BaseType(f"bytes{n_bytes}", is_literal=True) return IRnode.from_list(val, typ=typ)
def parse_UnaryOp(self): operand = Expr.parse_value_expr(self.expr.operand, self.context) if isinstance(self.expr.op, vy_ast.Not): if isinstance(operand.typ, BaseType) and operand.typ.typ == "bool": return IRnode.from_list(["iszero", operand], typ="bool") if isinstance(self.expr.op, vy_ast.Invert): if isinstance(operand.typ, EnumType): n_members = len(operand.typ.members) # use (xor 0b11..1 operand) to flip all the bits in # `operand`. `mask` could be a very large constant and # hurt codesize, but most user enums will likely have few # enough members that the mask will not be large. mask = (2**n_members) - 1 return IRnode.from_list(["xor", mask, operand], typ=operand.typ) if is_base_type(operand.typ, "uint256"): return IRnode.from_list(["not", operand], typ=operand.typ) # block `~` for all other integer types, since reasoning # about dirty bits is not entirely trivial. maybe revisit # this at a later date. raise UnimplementedException( f"~ is not supported for {operand.typ}", self.expr) if isinstance(self.expr.op, vy_ast.USub) and is_numeric_type( operand.typ): assert operand.typ._num_info.is_signed # Clamp on minimum signed integer value as we cannot negate that # value (all other integer values are fine) min_int_val, _ = operand.typ._num_info.bounds return IRnode.from_list( ["sub", 0, clamp("sgt", operand, min_int_val)], typ=operand.typ)
def keccak256_helper(expr, to_hash, context): _check_byteslike(to_hash.typ, expr) # Can hash literals # TODO this is dead code. if isinstance(to_hash, bytes): return IRnode.from_list(bytes_to_int(keccak256(to_hash)), typ=BaseType("bytes32")) # Can hash bytes32 objects if is_base_type(to_hash.typ, "bytes32"): return IRnode.from_list( [ "seq", ["mstore", MemoryPositions.FREE_VAR_SPACE, to_hash], ["sha3", MemoryPositions.FREE_VAR_SPACE, 32], ], typ=BaseType("bytes32"), add_gas_estimate=_gas_bound(1), ) to_hash = ensure_in_memory(to_hash, context) with to_hash.cache_when_complex("buf") as (b1, to_hash): data = bytes_data_ptr(to_hash) len_ = get_bytearray_length(to_hash) return b1.resolve( IRnode.from_list( ["sha3", data, len_], typ="bytes32", annotation="keccak256", add_gas_estimate=_gas_bound(ceil(to_hash.typ.maxlen / 32)), ))
def _complex_make_setter(left, right): if right.value == "~empty" and left.location == MEMORY: # optimized memzero return mzero(left, left.typ.memory_bytes_required) ret = ["seq"] if isinstance(left.typ, SArrayType): n_items = right.typ.count keys = [IRnode.from_list(i, typ="uint256") for i in range(n_items)] if isinstance(left.typ, TupleLike): keys = left.typ.tuple_keys() # if len(keyz) == 0: # return IRnode.from_list(["pass"]) # general case # TODO use copy_bytes when the generated code is above a certain size with left.cache_when_complex("_L") as ( b1, left), right.cache_when_complex("_R") as (b2, right): for k in keys: l_i = get_element_ptr(left, k, array_bounds_check=False) r_i = get_element_ptr(right, k, array_bounds_check=False) ret.append(make_setter(l_i, r_i)) return b1.resolve(b2.resolve(IRnode.from_list(ret)))
def clamp_nonzero(arg): # TODO: use clamp("ne", arg, 0) once optimizer rules can handle it with IRnode.from_list(arg).cache_when_complex("should_nonzero") as (b1, arg): check = IRnode.from_list(["assert", arg], error_msg="clamp_nonzero") ret = ["seq", check, arg] return IRnode.from_list(b1.resolve(ret), typ=arg.typ)
def safe_sub(x, y): num_info = x.typ._num_info res = IRnode.from_list(["sub", x, y], typ=x.typ.typ) if num_info.bits < 256: return clamp_basetype(res) # bits == 256 with res.cache_when_complex("ans") as (b1, res): if num_info.is_signed: # if r < 0: # ans > l # else: # ans <= l # aka (iszero (ans > l)) # aka: (r < 0) == (ans > l) ok = ["eq", ["slt", y, 0], ["sgt", res, x]] else: # note this is "equivalent" to the unsigned form # of the above (because y < 0 == False) # ["eq", ["lt", y, 0], ["gt", res, x]] # TODO push down into optimizer rules. ok = ["le", res, x] check = IRnode.from_list(["assert", ok], error_msg="safesub") ret = IRnode.from_list(["seq", check, res]) return b1.resolve(ret)
def keccak256_helper(expr, ir_arg, context): sub = ir_arg # TODO get rid of useless variable _check_byteslike(sub.typ, expr) # Can hash literals # TODO this is dead code. if isinstance(sub, bytes): return IRnode.from_list(bytes_to_int(keccak256(sub)), typ=BaseType("bytes32")) # Can hash bytes32 objects if is_base_type(sub.typ, "bytes32"): return IRnode.from_list( [ "seq", ["mstore", MemoryPositions.FREE_VAR_SPACE, sub], ["sha3", MemoryPositions.FREE_VAR_SPACE, 32], ], typ=BaseType("bytes32"), add_gas_estimate=_gas_bound(1), ) sub = ensure_in_memory(sub, context) return IRnode.from_list( [ "with", "_buf", sub, ["sha3", ["add", "_buf", 32], ["mload", "_buf"]], ], typ=BaseType("bytes32"), annotation="keccak256", add_gas_estimate=_gas_bound(ceil(sub.typ.maxlen / 32)), )
def parse_Name(self): if self.expr.id == "self": return IRnode.from_list(["address"], typ="address") elif self.expr.id in self.context.vars: var = self.context.vars[self.expr.id] return IRnode.from_list( var.pos, typ=var.typ, location=var. location, # either 'memory' or 'calldata' storage is handled above. encoding=var.encoding, annotation=self.expr.id, mutable=var.mutable, ) elif self.expr._metadata["type"].is_immutable: var = self.context.globals[self.expr.id] ofst = self.expr._metadata["type"].position.offset if self.context.sig.is_init_func: mutable = True location = IMMUTABLES else: mutable = False location = DATA return IRnode.from_list(ofst, typ=var.typ, location=location, annotation=self.expr.id, mutable=mutable)
def safe_pow(x, y): num_info = x.typ._num_info if not is_integer_type(x.typ): # type checker should have caught this raise TypeCheckFailure("non-integer pow") if x.is_literal: # cannot pass 1 or 0 to `calculate_largest_power` if x.value == 1: return IRnode.from_list([1]) if x.value == 0: return IRnode.from_list(["iszero", y]) upper_bound = calculate_largest_power(x.value, num_info.bits, num_info.is_signed) + 1 # for signed integers, this also prevents negative values ok = ["lt", y, upper_bound] elif y.is_literal: upper_bound = calculate_largest_base(y.value, num_info.bits, num_info.is_signed) + 1 if num_info.is_signed: ok = ["and", ["slt", x, upper_bound], ["sgt", x, -upper_bound]] else: ok = ["lt", x, upper_bound] else: # `a ** b` where neither `a` or `b` are known # TODO this is currently unreachable, once we implement a way to do it safely # remove the check in `vyper/context/types/value/numeric.py` return return IRnode.from_list(["seq", ["assert", ok], ["exp", x, y]])
def finalize(fill_return_buffer): fill_return_buffer = IRnode.from_list( fill_return_buffer, annotation=f"fill return buffer {sig._ir_identifier}" ) cleanup_loops = "cleanup_repeat" if context.forvars else "seq" # NOTE: because stack analysis is incomplete, cleanup_repeat must # come after fill_return_buffer otherwise the stack will break return IRnode.from_list(["seq", fill_return_buffer, cleanup_loops, jump_to_exit])
def _mul(x, y): x, y = IRnode.from_list(x), IRnode.from_list(y) # NOTE: similar deal: duplicate with optimizer rule if isinstance(x.value, int) and isinstance(y.value, int): ret = x.value * y.value else: ret = ["mul", x, y] return IRnode.from_list(ret)
def unwrap_location(orig): if orig.location is not None: return IRnode.from_list(LOAD(orig), typ=orig.typ) else: # CMC 2022-03-24 TODO refactor so this branch can be removed if orig.value == "~empty": return IRnode.from_list(0, typ=orig.typ) return orig
def parse_List(self): typ = new_type_to_old_type(self.expr._metadata["type"]) if len(self.expr.elements) == 0: return IRnode.from_list("~empty", typ=typ) multi_ir = [Expr(x, self.context).ir_node for x in self.expr.elements] return IRnode.from_list(["multi"] + multi_ir, typ=typ)
def generate_ir_for_module( global_ctx: GlobalContext ) -> Tuple[IRnode, IRnode, FunctionSignatures]: # order functions so that each function comes after all of its callees function_defs = _topsort(global_ctx._function_defs) # FunctionSignatures for all interfaces defined in this module all_sigs: Dict[str, FunctionSignatures] = {} if global_ctx._contracts or global_ctx._interfaces: all_sigs = parse_external_interfaces(all_sigs, global_ctx) init_function: Optional[vy_ast.FunctionDef] = None sigs: FunctionSignatures = {} # generate all signatures # TODO really this should live in GlobalContext for f in function_defs: sig = FunctionSignature.from_definition(f, global_ctx) # add it to the global namespace. sigs[sig.name] = sig # a little hacky, eventually FunctionSignature should be # merged with ContractFunction and we can remove this. f._metadata["signature"] = sig assert "self" not in all_sigs all_sigs["self"] = sigs runtime_functions = [f for f in function_defs if not _is_init_func(f)] init_function = next((f for f in function_defs if _is_init_func(f)), None) runtime, internal_functions = _runtime_ir(runtime_functions, all_sigs, global_ctx) deploy_code: List[Any] = ["seq"] immutables_len = global_ctx.immutable_section_bytes if init_function: init_func_ir = generate_ir_for_function(init_function, all_sigs, global_ctx, False) deploy_code.append(init_func_ir) # pass the amount of memory allocated for the init function # so that deployment does not clobber while preparing immutables # note: (deploy mem_ofst, code, extra_padding) init_mem_used = init_function._metadata[ "signature"].frame_info.mem_used deploy_code.append(["deploy", init_mem_used, runtime, immutables_len]) # internal functions come after everything else for f in init_function._metadata["type"].called_functions: deploy_code.append(internal_functions[f.name]) else: if immutables_len != 0: raise CompilerPanic("unreachable") deploy_code.append(["deploy", 0, runtime, 0]) return IRnode.from_list(deploy_code), IRnode.from_list(runtime), sigs
def safe_div(x, y): num_info = x.typ._num_info typ = x.typ ok = [1] # true if is_decimal_type(x.typ): lo, hi = num_info.bounds if max(abs(lo), abs(hi)) * num_info.divisor > 2**256 - 1: # stub to prevent us from adding fixed point numbers we don't know # how to deal with raise UnimplementedException( "safe_mul for decimal{num_info.bits}x{num_info.decimals}") x = ["mul", x, num_info.divisor] DIV = "sdiv" if num_info.is_signed else "div" res = IRnode.from_list([DIV, x, clamp("gt", y, 0)], typ=typ) with res.cache_when_complex("res") as (b1, res): # TODO: refactor this condition / push some things into the optimizer if num_info.is_signed and num_info.bits == 256: if version_check(begin="constantinople"): upper_bound = ["shl", 255, 1] else: upper_bound = -(2**255) if not x.is_literal and not y.typ.is_literal: ok = ["or", ["ne", y, ["not", 0]], ["ne", x, upper_bound]] # TODO push these rules into the optimizer elif x.is_literal and x.value == -(2**255): ok = ["ne", y, ["not", 0]] elif y.is_literal and y.value == -1: ok = ["ne", x, upper_bound] else: # x or y is a literal, and not an evil value. pass elif num_info.is_signed and is_integer_type(typ): lo, hi = num_info.bounds # we need to throw on min_value(typ) / -1, # but we can skip if one of the operands is a literal and not # the evil value can_skip_clamp = (x.is_literal and x.value != lo) or (y.is_literal and y.value != -1) if not can_skip_clamp: # clamp_basetype has fewer ops than the int256 rule. res = clamp_basetype(res) elif is_decimal_type(typ): # always clamp decimals, since decimal division can actually # result in something larger than either operand (e.g. 1.0 / 0.1) # TODO maybe use safe_mul res = clamp_basetype(res) check = IRnode.from_list(["assert", ok], error_msg="safemul") return IRnode.from_list(b1.resolve(["seq", check, res]))
def _get_element_ptr_tuplelike(parent, key): typ = parent.typ assert isinstance(typ, TupleLike) if isinstance(typ, StructType): assert isinstance(key, str) subtype = typ.members[key] attrs = list(typ.tuple_keys()) index = attrs.index(key) annotation = key else: assert isinstance(key, int) subtype = typ.members[key] attrs = list(range(len(typ.members))) index = key annotation = None # generated by empty() + make_setter if parent.value == "~empty": return IRnode.from_list("~empty", typ=subtype) if parent.value == "multi": assert parent.encoding != Encoding.ABI, "no abi-encoded literals" return parent.args[index] ofst = 0 # offset from parent start if parent.encoding == Encoding.ABI: if parent.location == STORAGE: raise CompilerPanic("storage variables should not be abi encoded" ) # pragma: notest member_t = typ.members[attrs[index]] for i in range(index): member_abi_t = typ.members[attrs[i]].abi_type ofst += member_abi_t.embedded_static_size() return _getelemptr_abi_helper(parent, member_t, ofst) if parent.location.word_addressable: for i in range(index): ofst += typ.members[attrs[i]].storage_size_in_words elif parent.location.byte_addressable: for i in range(index): ofst += typ.members[attrs[i]].memory_bytes_required else: raise CompilerPanic( f"bad location {parent.location}") # pragma: notest return IRnode.from_list( add_ofst(parent, ofst), typ=subtype, location=parent.location, encoding=parent.encoding, annotation=annotation, )
def parse_Int(self): # Literal (mostly likely) becomes int256 if self.expr.n < 0: return IRnode.from_list(self.expr.n, typ=BaseType("int256", is_literal=True)) # Literal is large enough (mostly likely) becomes uint256. else: return IRnode.from_list(self.expr.n, typ=BaseType("uint256", is_literal=True))
def add_ofst(ptr, ofst): ofst = IRnode.from_list(ofst) if isinstance(ptr.value, int) and isinstance(ofst.value, int): # NOTE: duplicate with optimizer rule (but removing this makes a # test on --no-optimize mode use too much gas) ret = ptr.value + ofst.value else: ret = ["add", ptr, ofst] return IRnode.from_list(ret, location=ptr.location, encoding=ptr.encoding)
def test_ir_optimizer(ir): optimized = optimizer.optimize(IRnode.from_list(ir[0])) optimized.repr_show_gas = True if ir[1] is None: # no-op, assert optimizer does nothing expected = IRnode.from_list(ir[0]) else: expected = IRnode.from_list(ir[1]) expected.repr_show_gas = True optimized.annotation = None assert optimized == expected
def _encode_dyn_array_helper(dst, ir_node, context): # if it's a literal, first serialize to memory as we # don't have a compile-time abi encoder # TODO handle this upstream somewhere if ir_node.value == "multi": buf = context.new_internal_variable(dst.typ) buf = IRnode.from_list(buf, typ=dst.typ, location=MEMORY) _bufsz = dst.typ.abi_type.size_bound() return [ "seq", make_setter(buf, ir_node), [ "set", "dyn_ofst", abi_encode(dst, buf, context, _bufsz, returns_len=True) ], ] subtyp = ir_node.typ.subtype child_abi_t = subtyp.abi_type ret = ["seq"] len_ = get_dyn_array_count(ir_node) with len_.cache_when_complex("len") as (b, len_): # set the length word ret.append(STORE(dst, len_)) # prepare the loop t = BaseType("uint256") i = IRnode.from_list(context.fresh_varname("ix"), typ=t) # offset of the i'th element in ir_node child_location = get_element_ptr(ir_node, i, array_bounds_check=False) # offset of the i'th element in dst dst = add_ofst(dst, 32) # jump past length word static_elem_size = child_abi_t.embedded_static_size() static_ofst = ["mul", i, static_elem_size] loop_body = _encode_child_helper(dst, child_location, static_ofst, "dyn_child_ofst", context) loop = ["repeat", i, 0, len_, ir_node.typ.count, loop_body] x = ["seq", loop, "dyn_child_ofst"] start_dyn_ofst = ["mul", len_, static_elem_size] run_children = ["with", "dyn_child_ofst", start_dyn_ofst, x] new_dyn_ofst = ["add", "dyn_ofst", run_children] # size of dynarray is size of encoded children + size of the length word # TODO optimize by adding 32 to the initial value of dyn_ofst new_dyn_ofst = ["add", 32, new_dyn_ofst] ret.append(["set", "dyn_ofst", new_dyn_ofst]) return b.resolve(ret)
def get_dyn_array_count(arg): assert isinstance(arg.typ, DArrayType) typ = BaseType("uint256") if arg.value == "multi": return IRnode.from_list(len(arg.args), typ=typ) if arg.value == "~empty": # empty(DynArray[...]) return IRnode.from_list(0, typ=typ) return IRnode.from_list(LOAD(arg), typ=typ)
def ensure_in_memory(ir_var, context): """Ensure a variable is in memory. This is useful for functions which expect to operate on memory variables. """ if ir_var.location == MEMORY: return ir_var typ = ir_var.typ buf = IRnode.from_list(context.new_internal_variable(typ), typ=typ, location=MEMORY) do_copy = make_setter(buf, ir_var) return IRnode.from_list(["seq", do_copy, buf], typ=typ, location=MEMORY)
def parse_UnaryOp(self): operand = Expr.parse_value_expr(self.expr.operand, self.context) if isinstance(self.expr.op, vy_ast.Not): if isinstance(operand.typ, BaseType) and operand.typ.typ == "bool": return IRnode.from_list(["iszero", operand], typ="bool") elif isinstance(self.expr.op, vy_ast.USub) and is_numeric_type( operand.typ): assert operand.typ._num_info.is_signed # Clamp on minimum integer value as we cannot negate that value # (all other integer values are fine) min_int_val, _ = operand.typ._num_info.bounds return IRnode.from_list( ["sub", 0, ["clampgt", operand, min_int_val]], typ=operand.typ, )
def _external_call_helper(contract_address, args_ir, call_kwargs, call_expr, context): # expr.func._metadata["type"].return_type is more accurate # than fn_sig.return_type in the case of JSON interfaces. fn_type = call_expr.func._metadata["type"] # sanity check assert fn_type.min_arg_count <= len(args_ir) <= fn_type.max_arg_count ret = ["seq"] # this is a sanity check to prevent double evaluation of the external call # in the codegen pipeline. if the external call gets doubly evaluated, # a duplicate label exception will get thrown during assembly. ret.append(eval_once_check(_freshname(call_expr.node_source_code))) buf, arg_packer, args_ofst, args_len = _pack_arguments( fn_type, args_ir, context) ret_unpacker, ret_ofst, ret_len = _unpack_returndata( buf, fn_type, call_kwargs, contract_address, context, call_expr) ret += arg_packer if fn_type.return_type is None and not call_kwargs.skip_contract_check: # if we do not expect return data, check that a contract exists at the # target address. we must perform this check BEFORE the call because # the contract might selfdestruct. on the other hand we can omit this # when we _do_ expect return data because we later check # `returndatasize` (that check works even if the contract # selfdestructs). ret.append(_extcodesize_check(contract_address)) gas = call_kwargs.gas value = call_kwargs.value use_staticcall = fn_type.mutability in (StateMutability.VIEW, StateMutability.PURE) if context.is_constant(): assert use_staticcall, "typechecker missed this" if use_staticcall: call_op = [ "staticcall", gas, contract_address, args_ofst, args_len, buf, ret_len ] else: call_op = [ "call", gas, contract_address, value, args_ofst, args_len, buf, ret_len ] ret.append(check_external_call(call_op)) return_t = None if fn_type.return_type is not None: return_t = new_type_to_old_type(fn_type.return_type) ret.append(ret_unpacker) return IRnode.from_list(ret, typ=return_t, location=MEMORY)
def LOAD(ptr: IRnode) -> IRnode: if ptr.location is None: raise CompilerPanic("cannot dereference non-pointer type") op = ptr.location.load_op if op is None: raise CompilerPanic(f"unreachable {ptr.location}") # pragma: notest return IRnode.from_list([op, ptr])
def _rewrite_return_sequences(ir_node, label_params=None): args = ir_node.args if ir_node.value == "return": if args[0].value == "ret_ofst" and args[1].value == "ret_len": ir_node.args[0].value = "pass" ir_node.args[1].value = "pass" if ir_node.value == "exit_to": # handle exit from private function if args[0].value == "return_pc": ir_node.value = "jump" args[0].value = "pass" else: # handle jump to cleanup assert is_symbol(args[0].value) ir_node.value = "seq" _t = ["seq"] if "return_buffer" in label_params: _t.append(["pop", "pass"]) dest = args[0].value[5:] # `_sym_foo` -> `foo` more_args = [ "pass" if t.value == "return_pc" else t for t in args[1:] ] _t.append(["goto", dest] + more_args) ir_node.args = IRnode.from_list(_t, source_pos=ir_node.source_pos).args if ir_node.value == "label": label_params = set(t.value for t in ir_node.args[1].args) for t in args: _rewrite_return_sequences(t, label_params)
def eval_once_check(name): # an IRnode which enforces uniqueness. include with a side-effecting # operation to sanity check that the codegen pipeline only generates # the side-effecting operation once (otherwise, IR-to-assembly will # throw a duplicate label exception). there is no runtime overhead # since the jumpdest gets optimized out in the final stage of assembly. return IRnode.from_list(["unique_symbol", name])
def clamp_basetype(ir_node): t = ir_node.typ if not isinstance(t, BaseType): raise CompilerPanic(f"{t} passed to clamp_basetype") # pragma: notest # copy of the input ir_node = unwrap_location(ir_node) if isinstance(t, EnumType): bits = len(t.members) # assert x >> bits == 0 ret = int_clamp(ir_node, bits, signed=False) elif is_integer_type(t) or is_decimal_type(t): if t._num_info.bits == 256: ret = ir_node else: ret = int_clamp(ir_node, t._num_info.bits, signed=t._num_info.is_signed) elif is_bytes_m_type(t): if t._bytes_info.m == 32: ret = ir_node # special case, no clamp. else: ret = bytes_clamp(ir_node, t._bytes_info.m) elif t.typ in ("address", ): ret = int_clamp(ir_node, 160) elif t.typ in ("bool", ): ret = int_clamp(ir_node, 1) else: # pragma: nocover raise CompilerPanic(f"{t} passed to clamp_basetype") return IRnode.from_list(ret, typ=ir_node.typ)