Exemple #1
0
def _pack_arguments(contract_sig, args, context, pos):
    # abi encoding just treats all args as a big tuple
    args_tuple_t = TupleType([x.typ for x in args])
    args_as_tuple = LLLnode.from_list(["multi"] + [x for x in args],
                                      typ=args_tuple_t)
    args_abi_t = args_tuple_t.abi_type

    # sanity typecheck - make sure the arguments can be assigned
    dst_tuple_t = TupleType([arg.typ for arg in contract_sig.args][:len(args)])
    check_assign(dummy_node_for_type(dst_tuple_t), args_as_tuple)

    if contract_sig.return_type is not None:
        return_abi_t = calculate_type_for_external_return(
            contract_sig.return_type).abi_type

        # we use the same buffer for args and returndata,
        # so allocate enough space here for the returndata too.
        buflen = max(args_abi_t.size_bound(), return_abi_t.size_bound())
    else:
        buflen = args_abi_t.size_bound()

    buflen += 32  # padding for the method id

    buf_t = get_type_for_exact_size(buflen)
    buf = context.new_internal_variable(buf_t)

    args_ofst = buf + 28
    args_len = args_abi_t.size_bound() + 4

    abi_signature = contract_sig.name + dst_tuple_t.abi_type.selector_name()

    # layout:
    # 32 bytes                 | args
    # 0x..00<method_id_4bytes> | args
    # the reason for the left padding is just so the alignment is easier.
    # if we were only targeting constantinople, we could align
    # to buf (and also keep code size small) by using
    # (mstore buf (shl signature.method_id 224))
    mstore_method_id = [["mstore", buf, util.abi_method_id(abi_signature)]]

    if len(args) == 0:
        encode_args = ["pass"]
    else:
        encode_args = abi_encode(buf + 32,
                                 args_as_tuple,
                                 context,
                                 pos,
                                 bufsz=buflen)

    return buf, mstore_method_id + [encode_args], args_ofst, args_len
Exemple #2
0
    def parse_Call(self):
        # TODO use expr.func.type.is_internal once type annotations
        # are consistently available.
        is_self_function = ((isinstance(self.stmt.func, vy_ast.Attribute))
                            and isinstance(self.stmt.func.value, vy_ast.Name)
                            and self.stmt.func.value.id == "self")

        if isinstance(self.stmt.func, vy_ast.Name):
            funcname = self.stmt.func.id
            return STMT_DISPATCH_TABLE[funcname].build_IR(
                self.stmt, self.context)

        elif isinstance(self.stmt.func,
                        vy_ast.Attribute) and self.stmt.func.attr in (
                            "append",
                            "pop",
                        ):
            # TODO: consider moving this to builtins
            darray = Expr(self.stmt.func.value, self.context).ir_node
            args = [Expr(x, self.context).ir_node for x in self.stmt.args]
            if self.stmt.func.attr == "append":
                # sanity checks
                assert len(args) == 1
                arg = args[0]
                assert isinstance(darray.typ, DArrayType)
                check_assign(dummy_node_for_type(darray.typ.subtype),
                             dummy_node_for_type(arg.typ))

                return append_dyn_array(darray, arg)
            else:
                assert len(args) == 0
                return pop_dyn_array(darray, return_popped_item=False)

        elif is_self_function:
            return self_call.ir_for_self_call(self.stmt, self.context)
        else:
            return external_call.ir_for_external_call(self.stmt, self.context)
Exemple #3
0
def make_return_stmt(lll_val: LLLnode, stmt: Any,
                     context: Context) -> Optional[LLLnode]:

    sig = context.sig

    jump_to_exit = ["exit_to", f"_sym_{sig.exit_sequence_label}"]

    _pos = getpos(stmt)

    if context.return_type is None:
        if stmt.value is not None:
            return None  # triggers an exception

    else:
        # sanity typecheck
        check_assign(dummy_node_for_type(context.return_type), lll_val)

    # helper function
    def finalize(fill_return_buffer):
        # do NOT bypass this. jump_to_exit may do important function cleanup.
        fill_return_buffer = LLLnode.from_list(
            fill_return_buffer,
            annotation=f"fill return buffer {sig._lll_identifier}")
        cleanup_loops = "cleanup_repeat" if context.forvars else "pass"
        # NOTE: because stack analysis is incomplete, cleanup_repeat must
        # come after fill_return_buffer otherwise the stack will break
        return LLLnode.from_list(
            ["seq", fill_return_buffer, cleanup_loops, jump_to_exit],
            pos=_pos,
        )

    if context.return_type is None:
        jump_to_exit += ["return_pc"]
        return finalize(["pass"])

    if context.is_internal:
        dst = LLLnode.from_list(["return_buffer"],
                                typ=context.return_type,
                                location="memory")
        fill_return_buffer = make_setter(dst, lll_val, pos=_pos)
        jump_to_exit += ["return_pc"]

        return finalize(fill_return_buffer)

    else:  # return from external function

        lll_val = wrap_value_for_external_return(lll_val)

        external_return_type = calculate_type_for_external_return(
            context.return_type)
        maxlen = external_return_type.abi_type.size_bound()
        return_buffer_ofst = context.new_internal_variable(
            get_type_for_exact_size(maxlen))

        # encode_out is cleverly a sequence which does the abi-encoding and
        # also returns the length of the output as a stack element
        encode_out = abi_encode(return_buffer_ofst,
                                lll_val,
                                context,
                                pos=_pos,
                                returns_len=True,
                                bufsz=maxlen)

        # previously we would fill the return buffer and push the location and length onto the stack
        # inside of the `seq_unchecked` thereby leaving it for the function cleanup routine expects
        # the return_ofst and return_len to be on the stack
        # CMC introduced `goto` with args so this enables us to replace `seq_unchecked` w/ `seq`
        # and then just append the arguments for the cleanup to the `jump_to_exit` list
        # check in vyper/codegen/self_call.py for an example
        jump_to_exit += [return_buffer_ofst, encode_out]  # type: ignore

        return finalize(["pass"])
Exemple #4
0
def make_return_stmt(ir_val: IRnode, stmt: Any, context: Context) -> Optional[IRnode]:

    sig = context.sig

    jump_to_exit = ["exit_to", f"_sym_{sig.exit_sequence_label}"]

    if context.return_type is None:
        if stmt.value is not None:
            return None  # triggers an exception

    else:
        # sanity typecheck
        check_assign(dummy_node_for_type(context.return_type), ir_val)

    # helper function
    def finalize(fill_return_buffer):
        # do NOT bypass this. jump_to_exit may do important function cleanup.
        fill_return_buffer = IRnode.from_list(
            fill_return_buffer, annotation=f"fill return buffer {sig._ir_identifier}"
        )
        cleanup_loops = "cleanup_repeat" if context.forvars else "pass"
        # NOTE: because stack analysis is incomplete, cleanup_repeat must
        # come after fill_return_buffer otherwise the stack will break
        return IRnode.from_list(["seq", fill_return_buffer, cleanup_loops, jump_to_exit])

    if context.return_type is None:
        jump_to_exit += ["return_pc"]
        return finalize(["pass"])

    if context.is_internal:
        dst = IRnode.from_list(["return_buffer"], typ=context.return_type, location=MEMORY)
        fill_return_buffer = make_setter(dst, ir_val)
        jump_to_exit += ["return_pc"]

        return finalize(fill_return_buffer)

    else:  # return from external function

        external_return_type = calculate_type_for_external_return(context.return_type)
        maxlen = external_return_type.abi_type.size_bound()

        # optimize: if the value already happens to be ABI encoded in
        # memory, don't bother running abi_encode, just return the
        # buffer it is in.
        can_skip_encode = (
            abi_encoding_matches_vyper(ir_val.typ)
            and ir_val.location == MEMORY
            # ensure it has already been validated - could be
            # unvalidated ABI encoded returndata for example
            and not needs_clamp(ir_val.typ, ir_val.encoding)
        )

        if can_skip_encode:
            assert ir_val.typ.memory_bytes_required == maxlen  # type: ignore
            jump_to_exit += [ir_val, maxlen]  # type: ignore
            return finalize(["pass"])

        ir_val = wrap_value_for_external_return(ir_val)

        # general case: abi_encode the data to a newly allocated buffer
        # and return the buffer
        return_buffer_ofst = context.new_internal_variable(get_type_for_exact_size(maxlen))

        # encode_out is cleverly a sequence which does the abi-encoding and
        # also returns the length of the output as a stack element
        return_len = abi_encode(return_buffer_ofst, ir_val, context, returns_len=True, bufsz=maxlen)

        # append ofst and len to exit_to the cleanup subroutine
        jump_to_exit += [return_buffer_ofst, return_len]  # type: ignore

        return finalize(["pass"])