示例#1
0
文件: core.py 项目: rollmind/llvmlite
 def double():
     return ir.DoubleType()
示例#2
0
]

import operator
from llvmlite import ir
from numba.core import datamodel, extending, types, imputils, typing, cgutils, typeconv
""" TODO: use local registries, currently blocked by overloading
operator.getitem that should use rbc pipeline class.  """
typing_registry = typing.templates.builtin_registry  # TODO: Registry()
lowering_registry = imputils.builtin_registry  # TODO: Registry()

int8_t = ir.IntType(8)
int32_t = ir.IntType(32)
int64_t = ir.IntType(64)
void_t = ir.VoidType()
fp32 = ir.FloatType()
fp64 = ir.DoubleType()


class StructureTypeAttributeTemplate(typing.templates.AttributeTemplate):
    key = NotImplemented

    def generic_resolve(self, typ, attr):
        model = datamodel.default_manager.lookup(typ)
        return model.get_member_fe_type(attr)


class StructurePointerTypeAttributeTemplate(typing.templates.AttributeTemplate
                                            ):
    key = NotImplemented

    def generic_resolve(self, typ, attr):
示例#3
0
 def type() -> ir.Type:
     return ir.DoubleType()
示例#4
0
                                    impl_ret_new_ref)
from numba.typing import signature
from numba import _helperlib, cgutils, types, jit

registry = Registry()
lower = registry.lower

int32_t = ir.IntType(32)
int64_t = ir.IntType(64)


def const_int(x):
    return ir.Constant(int32_t, x)


double = ir.DoubleType()

N = 624
N_const = ir.Constant(int32_t, N)

_pid = None


def random_init():
    """
    Initialize the random states with system entropy.
    """
    global _pid
    if _pid != os.getpid():
        b = os.urandom(N * 4)
        for n in ('py_random_state', 'np_random_state'):
示例#5
0
from typing import List
from llvmlite.ir import builder


class DType(Enum):
    Int = 1
    Float = 2
    Double = 3
    Complx = 4
    DComplx = 5


type_map_llvm = {
    DType.Int: ir.IntType(32),
    DType.Float: ir.FloatType(),
    DType.Double: ir.DoubleType(),
    DType.Complx: ir.FloatType(),
    DType.DComplx: ir.DoubleType()
}

int_type = ir.IntType(32)
float_type = ir.FloatType()
double_type = ir.DoubleType()
void_type = ir.VoidType()
ll_ptr_float = ir.PointerType(float_type)
ll_ptr_double = ir.PointerType(double_type)
ll_ptr_int = ir.PointerType(int_type)

map_kk_ct = {
    DType.Int: (c_int, ll_ptr_int),
    DType.Float: (c_float, ll_ptr_float),
示例#6
0
import ctypes
from slr import SLRParser
from AST import asts, AST, flattenLists
from varClasses import NamedType, FunType
from semantic import errorCheck
from symTable import SymbolTable, NamedSymbolTable

from llvmlite import ir, binding
from ctypes import CFUNCTYPE, c_double, c_int

import llvmlite

b = binding.create_context()

i64 = ir.IntType(64)
fp = ir.DoubleType()
boolean = ir.IntType(1)
unit = 'll_unit'

module = ir.Module(name='Default')
fnty = ir.FunctionType(ir.VoidType(), ())
ext_func = ir.Function(module, fnty, name='test')
fnty = ir.FunctionType(i64, (i64,))
runtime_my_malloc = ir.Function(module, fnty, name='My_Malloc')
fnty = ir.FunctionType(i64, (i64,))
runtime_print_int = ir.Function(module, fnty, name = 'Print_int')
t_data = binding.create_target_data('e-m:o-i64:64-f80:128-n8:16:32:64-S128')

def gen_unit(t):
    return t(None)
示例#7
0
from ctypes import CFUNCTYPE, c_double
from llvmlite import ir
from textwrap import dedent
import llvmlite.binding as llvm
import parseAST as pAST
import codegen as cgen
import typedefs as td
from typeinfer import apply
import typeinfer as ti
import typeVisitor as tv
import numpy as np

#pointer     = ir.PointerType()
int_type = ir.IntType(32)
float_type = ir.FloatType()
double_type = ir.DoubleType()
void_type = ir.VoidType()
void_ptr = ir.PointerType(8)

##TODO add import here
int32 = td.TCon("Int32")
int64 = td.TCon("Int64")
float32 = td.TCon("Float")
double64 = td.TCon("Double")
void = td.TCon("Void")
array = lambda t: td.TApp(td.TCon("Array"), t)

array_int32 = array(int32)
array_int64 = array(int64)
array_double64 = array(double64)
示例#8
0
    def _codegen_ForExprAST(self, node):
        # Output this as:
        #   ...
        #   start = startexpr
        #   goto loop
        # loop:
        #   variable = phi [start, loopheader], [nextvariable, loopend]
        #   ...
        #   bodyexpr
        #   ...
        # loopend:
        #   step = stepexpr
        #   nextvariable = variable + step
        #   endcond = endexpr
        #   br endcond, loop, endloop
        # outloop:

        # Emit the start expr first, without the variable in scope.
        start_val = self._codegen(node.start_expr)
        preheader_bb = self.builder.block
        loop_bb = self.builder.function.append_basic_block('loop')

        # Insert an explicit fall through from the current block to loop_bb
        self.builder.branch(loop_bb)
        self.builder.position_at_start(loop_bb)

        # Start the PHI node with an entry for start
        phi = self.builder.phi(ir.DoubleType(), node.id_name)
        phi.add_incoming(start_val, preheader_bb)

        # Within the loop, the variable is defined equal to the PHI node. If it
        # shadows an existing variable, we have to restore it, so save it now.
        oldval = self.func_symtab.get(node.id_name)
        self.func_symtab[node.id_name] = phi

        # Emit the body of the loop. This, like any other expr, can change the
        # current BB. Note that we ignore the value computed by the body.
        body_val = self._codegen(node.body)

        if node.step_expr is None:
            stepval = self.builder.constant(ir.DoubleType(), 1.0)
        else:
            stepval = self._codegen(node.step_expr)
        nextvar = self.builder.fadd(phi, stepval, 'nextvar')

        # Compute the end condition
        endcond = self._codegen(node.end_expr)
        cmp = self.builder.fcmp_ordered(
            '!=', endcond, self.builder.constant(ir.DoubleType(), 0.0),
            'loopcond')

        # Create the 'after loop' block and insert it
        loop_end_bb = self.builder.block
        after_bb = self.builder.function.append_basic_block('afterloop')

        # Insert the conditional branch into the end of loop_end_bb
        self.builder.cbranch(cmp, loop_bb, after_bb)

        # New code will be inserted into after_bb
        self.builder.position_at_start(after_bb)

        # Add a new entry to the PHI node for the backedge
        phi.add_incoming(nextvar, loop_end_bb)

        # Remove the loop variable from the symbol table; if it shadowed an
        # existing variable, restore that.
        if oldval is None:
            del self.func_symtab[node.id_name]
        else:
            self.func_symtab[node.id_name] = oldval

        # The 'for' expression always returns 0
        return self.builder.constant(ir.DoubleType(), 0.0)
示例#9
0
from llvmlite import ir

try:
    import numpy as np
except ImportError:
    np = None


#
# Basic IR types
#

void = ir.VoidType()
float32 = ir.FloatType()
float64 = ir.DoubleType()
int8 = ir.IntType(8)
int16 = ir.IntType(16)
int32 = ir.IntType(32)
int64 = ir.IntType(64)

# Pointers
int8p = int8.as_pointer()
int64p = int64.as_pointer()

# Constants
zero = ir.Constant(int64, 0)
one = ir.Constant(int64, 1)
zero32 = ir.Constant(int32, 0)

# Mapping from basic types to IR types
示例#10
0
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, Union

from llvmlite import ir

from llvm_utils import options, build_func
from tree import TypeTree

int1 = ir.IntType(1)
int32 = ir.IntType(32)
int64 = ir.IntType(64)
int8 = ir.IntType(8)
pint8 = int8.as_pointer()
# flt32 = ir.FloatType()
flt64 = ir.DoubleType()
char = ir.IntType(8)
unknown = ir.VoidType()


class VolpeType:
    def __repr__(self):
        raise NotImplementedError()

    def unwrap(self) -> ir.Type:
        raise NotImplementedError()

    def __hash__(self):
        raise NotImplementedError()

示例#11
0
文件: LLVMGen.py 项目: mingtaoy/RIAL
 def gen_double(self, number: float):
     return ir.Constant(ir.DoubleType(), number)
示例#12
0
文件: types.py 项目: weakit/Mathics
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from llvmlite import ir

int_type = ir.IntType(64)
real_type = ir.DoubleType()
bool_type = ir.IntType(1)
void_type = ir.VoidType()
示例#13
0
    else:
        return to_llvm_type.map[data_type.numpy_dtype]


if ir:
    to_llvm_type.map = {
        np.dtype(np.int8): ir.IntType(8),
        np.dtype(np.int16): ir.IntType(16),
        np.dtype(np.int32): ir.IntType(32),
        np.dtype(np.int64): ir.IntType(64),
        np.dtype(np.uint8): ir.IntType(8),
        np.dtype(np.uint16): ir.IntType(16),
        np.dtype(np.uint32): ir.IntType(32),
        np.dtype(np.uint64): ir.IntType(64),
        np.dtype(np.float32): ir.FloatType(),
        np.dtype(np.float64): ir.DoubleType(),
    }


def peel_off_type(dtype, type_to_peel_off):
    while type(dtype) is type_to_peel_off:
        dtype = dtype.base_type
    return dtype


def collate_types(types):
    """
    Takes a sequence of types and returns their "common type" e.g. (float, double, float) -> double
    Uses the collation rules from numpy.
    """
示例#14
0
 def gen_tipo(self, node):
     if (node == "inteiro" or node == "num_inteiro"):
         return ir.IntType(32)
     elif (node == "flutuante" or node == "num_flutuante"):
         return ir.DoubleType()
示例#15
0
    def body(self, node, builder, return_value): #ações do body
        for children in node.children:
            if children.type == 'variable_declaration':
                var_type = self.get_type(children)
                var_node_list = self.get_variable_list(children) #varios nós
                if var_type == 'inteiro':
                    for var in var_node_list:
                        alloca = builder.alloca(ir.IntType(32), name=var.type)
                        alloca.align = 4
                        self.scope_var_list.append(alloca)
                elif var_type == 'flutuante':
                    for var in var_node_list:
                        alloca = builder.alloca(ir.DoubleType(), name=var.type)
                        alloca.align = 4
                        self.scope_var_list.append(alloca)
            elif children.type == 'attribution':
                assigned_var_key = self.get_assigned_var(children)
                alloca = self.get_alloca_object(assigned_var_key)
                expression = self.expression(children.children[1], builder)
                if not str(expression.type) in str(alloca.type):
                    if 'i32' in str(alloca.type):
                        expression = builder.fptosi(expression, ir.IntType(32))
                    else:
                        expression = builder.sitofp(expression, ir.DoubleType())
                if len(children.children[0].children) == 1: #vetor unidimensional
                    index = self.expression(children.children[0].children[0], builder)
                    zero = ir.Constant(ir.IntType(32), 0)
                    gep = builder.gep(alloca, [zero, zero], inbounds=True)
                    assigned_vector = builder.gep(gep, [index], inbounds=True)
                    builder.store(expression, assigned_vector)

                elif len(children.children[0].children) == 0: #variavel normal
                    builder.store(expression, alloca)
            elif children.type == 'read':
                alloca = self.get_alloca_object(children.children[0].type)
                alloca_type = None
                if 'i32' in str(alloca.type):
                    alloca_type = '%d\0'
                else:
                    alloca_type = '%lf\0'
                read = self.external_declaration('scanf')
                args = [self.string_declaration(alloca_type), alloca]
                builder.call(read, args)
            elif children.type == 'write':
                loaded_var = self.expression(children.children[0], builder)
                write = self.external_declaration('printf')
                loaded_var_type = None
                args = None
                if 'i32' in str(loaded_var.type):
                    loaded_var_type = '%d\n\0'
                    args = [self.string_declaration(loaded_var_type), loaded_var]
                else:
                    loaded_var_type = '%lf\n\0'
                    args = [self.string_declaration(loaded_var_type), loaded_var]
                builder.call(write, args)
            elif children.type == 'return':
                function = self.get_current_function()
                entry_return = function.append_basic_block('entry_return')
                builder.branch(entry_return)
                with builder.goto_block(entry_return):
                    expression = self.expression(children.children[0], builder)
                    builder.store(expression, return_value)
                    builder.branch(self.exit_block)
                exit_return = function.append_basic_block('exit_return')
                builder.position_at_end(exit_return)
            elif children.type == 'if':
                self.global_clock += 1
                if_expression = self.expression(children.children[0].children[0], builder) # corrigir essa parte
                if len(children.children) == 2:
                    with builder.if_then(if_expression):
                        self.body(children.children[1].children[0], builder, return_value)
                else:
                    with builder.if_else(if_expression) as (then, otherwise):
                        with then:
                            self.body(children.children[1].children[0], builder, return_value)
                        with otherwise:
                            self.body(children.children[2].children[0], builder, return_value)
            elif children.type == 'function_call':
                function = self.get_function_object(children.children[0].type)
                arg_list = self.get_argument_list(children.children[0], builder) #retorna lista com argumentos prontos
                modified_arguments = self.change_argument_type(function, arg_list, builder)
                builder.call(function, modified_arguments, 'call')
            elif children.type == 'repeat':
                self.global_clock += 1
                function = self.get_current_function()
                repeat_entry_block = function.append_basic_block('entry_repeat')
                repeat_exit_block = function.append_basic_block('exit_repeat')
                builder.branch(repeat_entry_block)
                builder.position_at_end(repeat_entry_block)
                self.body(children.children[0].children[0], builder, return_value)
                repeat_expression = self.expression(children.children[1].children[0], builder)
                builder.cbranch(repeat_expression, repeat_exit_block, repeat_entry_block)
                builder.position_at_end(repeat_exit_block)
 def _alloca(self, name):
     """Create an alloca in the entry BB of the current function."""
     with self.builder.goto_entry_block():
         alloca = self.builder.alloca(ir.DoubleType(), size=None, name=name)
     return alloca
示例#17
0

def _repr(obj):
    """
    Get the representation of an object, with dedicated pprint-like format for lists.
    """
    if isinstance(obj, list):
        return '[' + (',\n '.join(
            (_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
    else:
        return repr(obj)


int_ty = ir.IntType(32)
i64_ty = ir.IntType(64)
float_ty = ir.DoubleType()
char_ty = ir.IntType(8)
bool_ty = ir.IntType(1)
void_ty = ir.VoidType()

intptr_ty = int_ty.as_pointer()
floatptr_ty = float_ty.as_pointer()
charptr_ty = char_ty.as_pointer()
voidptr_ty = char_ty.as_pointer()

llvm_type = {
    'int': int_ty,
    'int_*': intptr_ty,
    'float': float_ty,
    'float_*': floatptr_ty,
    'char': char_ty,
def irdouble(pyval):
    """Converts a python value into an IR double constant value"""
    return ir.Constant(ir.DoubleType(), pyval)
示例#19
0
class LLVMBuilderContext:
    __global_context = None
    __uniq_counter = 0
    _llvm_generation = 0
    int32_ty = ir.IntType(32)
    float_ty = ir.DoubleType()

    def __init__(self):
        self._modules = []
        self._cache = weakref.WeakKeyDictionary()
        self._learningcache = weakref.WeakKeyDictionary()

    def __enter__(self):
        module = ir.Module(name="PsyNeuLinkModule-" +
                           str(LLVMBuilderContext._llvm_generation))
        self._modules.append(module)
        LLVMBuilderContext._llvm_generation += 1
        return self

    def __exit__(self, e_type, e_value, e_traceback):
        assert len(self._modules) > 0
        module = self._modules.pop()
        _modules.add(module)
        _all_modules.add(module)

    @property
    def module(self):
        assert len(self._modules) > 0
        return self._modules[-1]

    @classmethod
    def get_global(cls):
        if cls.__global_context is None:
            cls.__global_context = LLVMBuilderContext()
        return cls.__global_context

    @classmethod
    def get_unique_name(cls, name: str):
        cls.__uniq_counter += 1
        name = re.sub(r"[- ()\[\]]", "_", name)
        return name + '_' + str(cls.__uniq_counter)

    def get_builtin(self, name: str, args=[], function_type=None):
        if name in _builtin_intrinsics:
            return self.import_llvm_function(_BUILTIN_PREFIX + name)
        if name in ('maxnum'):
            function_type = pnlvm.ir.FunctionType(args[0], [args[0], args[0]])
        return self.module.declare_intrinsic("llvm." + name, args,
                                             function_type)

    def create_llvm_function(self,
                             args,
                             component,
                             name=None,
                             return_type=ir.VoidType()):
        name = str(component) if name is None else name

        # Builtins are already unique and need to keep their special name
        func_name = name if name.startswith(
            _BUILTIN_PREFIX) else self.get_unique_name(name)
        func_ty = pnlvm.ir.FunctionType(return_type, args)
        llvm_func = pnlvm.ir.Function(self.module, func_ty, name=func_name)
        llvm_func.attributes.add('argmemonly')
        for a in llvm_func.args:
            if isinstance(a.type, ir.PointerType):
                a.attributes.add('nonnull')

        metadata = self.get_debug_location(llvm_func, component)
        if metadata is not None:
            scope = dict(metadata.operands)["scope"]
            llvm_func.set_metadata("dbg", scope)

        # Create entry block
        block = llvm_func.append_basic_block(name="entry")
        builder = pnlvm.ir.IRBuilder(block)
        builder.debug_metadata = metadata

        return builder

    def gen_llvm_function(self, obj) -> ir.Function:
        cache = self._cache
        try:
            # HACK: allows for learning bin func and non-learning to differ
            if obj.learning_enabled is True:
                cache = self._learningcache
        except AttributeError as e:
            pass

        if obj not in cache:
            cache[obj] = obj._gen_llvm_function()
        return cache[obj]

    def import_llvm_function(self, name) -> ir.Function:
        """
        Get function handle if function exists in current modele.
        Create function declaration if it exists in a older module.
        """
        try:
            f = self.gen_llvm_function(name)
        except AttributeError:
            f = _find_llvm_function(name, _all_modules | {self.module})
        # Add declaration to the current module
        if f.name not in self.module.globals:
            decl_f = ir.Function(self.module, f.type.pointee, f.name)
            assert decl_f.is_declaration
            return decl_f
        return f

    @staticmethod
    def get_debug_location(func: ir.Function, component):
        if "debug_info" not in debug_env:
            return

        mod = func.module
        path = inspect.getfile(
            component.__class__) if component is not None else "<pnl_builtin>"
        d_version = mod.add_metadata(
            [ir.IntType(32)(2), "Dwarf Version",
             ir.IntType(32)(4)])
        di_version = mod.add_metadata(
            [ir.IntType(32)(2), "Debug Info Version",
             ir.IntType(32)(3)])
        flags = mod.add_named_metadata("llvm.module.flags")
        if len(flags.operands) == 0:
            flags.add(d_version)
            flags.add(di_version)
        cu = mod.add_named_metadata("llvm.dbg.cu")
        di_file = mod.add_debug_info(
            "DIFile", {
                "filename": os.path.basename(path),
                "directory": os.path.dirname(path),
            })
        di_func_type = mod.add_debug_info(
            "DISubroutineType",
            {
                # None as `null`
                "types": mod.add_metadata([None]),
            })
        di_compileunit = mod.add_debug_info(
            "DICompileUnit", {
                "language": ir.DIToken("DW_LANG_Python"),
                "file": di_file,
                "producer": "PsyNeuLink",
                "runtimeVersion": 0,
                "isOptimized": False,
            },
            is_distinct=True)
        cu.add(di_compileunit)
        di_func = mod.add_debug_info("DISubprogram", {
            "name": func.name,
            "file": di_file,
            "line": 0,
            "type": di_func_type,
            "isLocal": False,
            "unit": di_compileunit,
        },
                                     is_distinct=True)
        di_loc = mod.add_debug_info("DILocation", {
            "line": 0,
            "column": 0,
            "scope": di_func,
        })
        return di_loc

    def get_input_struct_type(self, component):
        if hasattr(component, '_get_input_struct_type'):
            return component._get_input_struct_type(self)

        default_var = component.defaults.variable
        return self.convert_python_struct_to_llvm_ir(default_var)

    def get_output_struct_type(self, component):
        if hasattr(component, '_get_output_struct_type'):
            return component._get_output_struct_type(self)

        default_val = component.defaults.value
        return self.convert_python_struct_to_llvm_ir(default_val)

    def get_param_struct_type(self, component):
        if hasattr(component, '_get_param_struct_type'):
            return component._get_param_struct_type(self)

        params = component._get_param_values()
        return self.convert_python_struct_to_llvm_ir(params)

    def get_state_struct_type(self, component):
        if hasattr(component, '_get_state_struct_type'):
            return component._get_state_struct_type(self)

        stateful = component._get_state_values()
        return self.convert_python_struct_to_llvm_ir(stateful)

    def get_data_struct_type(self, component):
        if hasattr(component, '_get_data_struct_type'):
            return component._get_data_struct_type(self)

        return ir.LiteralStructType([])

    def get_param_ptr(self, component, builder, params_ptr, param_name):
        idx = self.int32_ty(component._get_param_ids().index(param_name))
        return builder.gep(params_ptr, [self.int32_ty(0), idx],
                           name="ptr_param_{}_{}".format(
                               param_name, component.name))

    def get_state_ptr(self, component, builder, state_ptr, port_Name):
        idx = self.int32_ty(component._get_state_ids().index(port_Name))
        return builder.gep(state_ptr, [self.int32_ty(0), idx],
                           name="ptr_state_{}_{}".format(
                               port_Name, component.name))

    def unwrap_2d_array(self, builder, element):
        if isinstance(element.type.pointee, ir.ArrayType) and isinstance(
                element.type.pointee.element, ir.ArrayType):
            assert element.type.pointee.count == 1
            return builder.gep(element, [self.int32_ty(0), self.int32_ty(0)])
        return element

    def inject_printf(self, builder, fmt, *args, override_debug=False):
        if "print_values" not in debug_env and not override_debug:
            return
        fmt += "\0"

        int8 = ir.IntType(8)
        stack_save = self.get_builtin("stacksave", [],
                                      ir.FunctionType(int8.as_pointer(), []))
        stack_restore = self.get_builtin(
            "stackrestore", [],
            ir.FunctionType(ir.VoidType(), [int8.as_pointer()]))

        old_stack = builder.call(stack_save, [])
        fmt_data = bytearray(fmt.encode("utf8"))

        # Allocate array to ease initialization
        fmt = builder.alloca(ir.ArrayType(int8, len(fmt_data)))
        builder.store(fmt.type.pointee(fmt_data), fmt)
        fmt_ptr = builder.gep(fmt, [self.int32_ty(0), self.int32_ty(0)])

        printf = self.get_builtin("printf")
        builder.call(printf, [fmt_ptr] + list(args))

        builder.call(stack_restore, [old_stack])

    def inject_printf_float_array(self,
                                  builder,
                                  array,
                                  prefix="",
                                  suffix="\n",
                                  override_debug=False):
        self.inject_printf(builder, prefix, override_debug=override_debug)

        with pnlvm.helpers.array_ptr_loop(builder, array,
                                          "print_array_loop") as (b1, i):
            self.inject_printf(b1,
                               "%f ",
                               b1.load(b1.gep(array, [self.int32_ty(0), i])),
                               override_debug=override_debug)

        self.inject_printf(builder, suffix, override_debug=override_debug)

    @contextmanager
    def _gen_composition_exec_context(self,
                                      composition,
                                      simulation=False,
                                      suffix=""):
        cond_gen = ConditionGenerator(self, composition)

        name = 'exec_sim_wrap_' if simulation else 'exec_wrap_'
        name += composition.name + suffix
        args = [
            self.get_state_struct_type(composition).as_pointer(),
            self.get_param_struct_type(composition).as_pointer(),
            self.get_input_struct_type(composition).as_pointer(),
            self.get_data_struct_type(composition).as_pointer(),
            cond_gen.get_condition_struct_type().as_pointer()
        ]
        builder = self.create_llvm_function(args, composition, name)
        llvm_func = builder.function

        for a in llvm_func.args:
            a.attributes.add('noalias')

        state, params, comp_in, data_arg, cond = llvm_func.args
        if "const_params" in debug_env:
            const_params = params.type.pointee(
                composition._get_param_initializer(None))
            params = builder.alloca(const_params.type, name="const_params_loc")
            builder.store(const_params, params)

        if "alloca_data" in debug_env:
            data = builder.alloca(data_arg.type.pointee)
            data_vals = builder.load(data_arg)
            builder.store(data_vals, data)
        else:
            data = data_arg

        yield builder, data, params, cond_gen

        if "alloca_data" in debug_env:
            data_vals = builder.load(data)
            builder.store(data_vals, data_arg)

        # Bump run counter
        cond_gen.bump_ts(builder, cond, (1, 0, 0))

        builder.ret_void()

    def gen_autodiffcomp_learning_exec(self, composition, simulation=False):
        composition._build_pytorch_representation(
            composition.default_execution_id)
        pytorch_model = composition.parameters.pytorch_representation.get(
            composition.default_execution_id)
        with self._gen_composition_exec_context(composition, simulation,
                                                "_learning") as (builder, data,
                                                                 params,
                                                                 cond_gen):
            state, _, comp_in, _, cond = builder.function.args

            pytorch_model._gen_llvm_training_function_body(
                self, builder, state, params, comp_in, data)
            # Call output CIM
            output_cim_w = composition._get_node_wrapper(
                composition.output_CIM)
            output_cim_f = self.import_llvm_function(output_cim_w)
            builder.block.name = "invoke_" + output_cim_f.name
            builder.call(output_cim_f, [state, params, comp_in, data, data])

            return builder.function

    def gen_autodiffcomp_exec(self, composition, simulation=False):
        """Creates llvm bin execute for autodiffcomp"""
        assert composition.controller is None
        composition._build_pytorch_representation(
            composition.default_execution_id)
        pytorch_model = composition.parameters.pytorch_representation.get(
            composition.default_execution_id)
        with self._gen_composition_exec_context(
                composition, simulation) as (builder, data, params, cond_gen):
            state, _, comp_in, _, cond = builder.function.args
            # Call input CIM
            input_cim_w = composition._get_node_wrapper(composition.input_CIM)
            input_cim_f = self.import_llvm_function(input_cim_w)

            builder.call(input_cim_f, [state, params, comp_in, data, data])

            # Call pytorch internal compiled llvm func
            input_cim_idx = composition._get_node_index(composition.input_CIM)

            model_params = builder.gep(
                params, [self.int32_ty(0), self.int32_ty(2)])

            # Extract the input that should be inserted into the model
            model_input = builder.gep(data, [
                self.int32_ty(0),
                self.int32_ty(0),
                self.int32_ty(input_cim_idx)
            ])
            model_output = builder.gep(data, [self.int32_ty(0)])

            pytorch_forward_func = self.import_llvm_function(pytorch_model)
            builder.call(pytorch_forward_func,
                         [state, model_params, model_input, model_output])

            # Call output CIM
            output_cim_w = composition._get_node_wrapper(
                composition.output_CIM)
            output_cim_f = self.import_llvm_function(output_cim_w)
            builder.block.name = "invoke_" + output_cim_f.name
            builder.call(output_cim_f, [state, params, comp_in, data, data])

            return builder.function

    def gen_composition_exec(self, composition, simulation=False):
        with self._gen_composition_exec_context(
                composition, simulation) as (builder, data, params, cond_gen):
            state, _, comp_in, _, cond = builder.function.args
            # Call input CIM
            input_cim_w = composition._get_node_wrapper(composition.input_CIM)
            input_cim_f = self.import_llvm_function(input_cim_w)
            builder.call(input_cim_f, [state, params, comp_in, data, data])

            # Call parameter CIM
            param_cim_w = composition._get_node_wrapper(
                composition.parameter_CIM)
            param_cim_f = self.import_llvm_function(param_cim_w)
            builder.call(param_cim_f, [state, params, comp_in, data, data])

            if simulation is False and composition.enable_controller and \
               composition.controller_mode == BEFORE:
                assert composition.controller is not None
                controller = composition._get_node_wrapper(
                    composition.controller)
                controller_f = self.import_llvm_function(controller)
                builder.call(controller_f,
                             [state, params, comp_in, data, data])

            # Allocate run set structure
            run_set_type = ir.ArrayType(ir.IntType(1), len(composition.nodes))
            run_set_ptr = builder.alloca(run_set_type, name="run_set")
            builder.store(run_set_type(None), run_set_ptr)

            # Allocate temporary output storage
            output_storage = builder.alloca(data.type.pointee,
                                            name="output_storage")

            iter_ptr = builder.alloca(self.int32_ty, name="iter_counter")
            builder.store(self.int32_ty(0), iter_ptr)

            loop_condition = builder.append_basic_block(
                name="scheduling_loop_condition")
            builder.branch(loop_condition)

            # Generate a while not 'end condition' loop
            builder.position_at_end(loop_condition)
            run_cond = cond_gen.generate_sched_condition(
                builder, composition.termination_processing[TimeScale.TRIAL],
                cond, None)
            run_cond = builder.not_(run_cond, name="not_run_cond")

            loop_body = builder.append_basic_block(name="scheduling_loop_body")
            exit_block = builder.append_basic_block(name="exit")
            builder.cbranch(run_cond, loop_body, exit_block)

            # Generate loop body
            builder.position_at_end(loop_body)

            zero = self.int32_ty(0)
            any_cond = ir.IntType(1)(0)

            # Calculate execution set before running the mechanisms
            for idx, mech in enumerate(composition.nodes):
                run_set_mech_ptr = builder.gep(
                    run_set_ptr, [zero, self.int32_ty(idx)],
                    name="run_cond_ptr_" + mech.name)
                mech_cond = cond_gen.generate_sched_condition(
                    builder, composition._get_processing_condition_set(mech),
                    cond, mech)
                ran = cond_gen.generate_ran_this_pass(builder, cond, mech)
                mech_cond = builder.and_(mech_cond,
                                         builder.not_(ran),
                                         name="run_cond_" + mech.name)
                any_cond = builder.or_(any_cond,
                                       mech_cond,
                                       name="any_ran_cond")
                builder.store(mech_cond, run_set_mech_ptr)

            for idx, mech in enumerate(composition.nodes):
                run_set_mech_ptr = builder.gep(run_set_ptr,
                                               [zero, self.int32_ty(idx)])
                mech_cond = builder.load(run_set_mech_ptr,
                                         name="mech_" + mech.name +
                                         "_should_run")
                with builder.if_then(mech_cond):
                    mech_w = composition._get_node_wrapper(mech)
                    mech_f = self.import_llvm_function(mech_w)
                    builder.block.name = "invoke_" + mech_f.name
                    # Wrappers do proper indexing of all structures
                    if len(mech_f.args
                           ) == 5:  # Mechanism wrappers have 5 inputs
                        builder.call(
                            mech_f,
                            [state, params, comp_in, data, output_storage])
                    else:
                        builder.call(mech_f, [
                            state, params, comp_in, data, output_storage, cond
                        ])

                    cond_gen.generate_update_after_run(builder, cond, mech)
                builder.block.name = "post_invoke_" + mech_f.name

            # Writeback results
            for idx, mech in enumerate(composition.nodes):
                run_set_mech_ptr = builder.gep(run_set_ptr,
                                               [zero, self.int32_ty(idx)])
                mech_cond = builder.load(run_set_mech_ptr,
                                         name="mech_" + mech.name + "_ran")
                with builder.if_then(mech_cond):
                    out_ptr = builder.gep(
                        output_storage,
                        [zero, zero, self.int32_ty(idx)],
                        name="result_ptr_" + mech.name)
                    data_ptr = builder.gep(
                        data, [zero, zero, self.int32_ty(idx)],
                        name="data_result_" + mech.name)
                    builder.store(builder.load(out_ptr), data_ptr)

            # Update step counter
            with builder.if_then(any_cond):
                builder.block.name = "inc_step"
                cond_gen.bump_ts(builder, cond)

            builder.block.name = "update_iter_count"
            # Increment number of iterations
            iters = builder.load(iter_ptr, name="iterw")
            iters = builder.add(iters, self.int32_ty(1), name="iterw_inc")
            builder.store(iters, iter_ptr)

            max_iters = len(composition.scheduler.consideration_queue)
            completed_pass = builder.icmp_unsigned("==",
                                                   iters,
                                                   self.int32_ty(max_iters),
                                                   name="completed_pass")
            # Increment pass and reset time step
            with builder.if_then(completed_pass):
                builder.block.name = "inc_pass"
                builder.store(zero, iter_ptr)
                # Bumping automatically zeros lower elements
                cond_gen.bump_ts(builder, cond, (0, 1, 0))

            builder.branch(loop_condition)

            builder.position_at_end(exit_block)

            if simulation is False and composition.enable_controller and \
               composition.controller_mode == AFTER:
                assert composition.controller is not None
                controller = composition._get_node_wrapper(
                    composition.controller)
                controller_f = self.import_llvm_function(controller)
                builder.call(controller_f,
                             [state, params, comp_in, data, data])

            # Call output CIM
            output_cim_w = composition._get_node_wrapper(
                composition.output_CIM)
            output_cim_f = self.import_llvm_function(output_cim_w)
            builder.block.name = "invoke_" + output_cim_f.name
            builder.call(output_cim_f, [state, params, comp_in, data, data])

        return builder.function

    def gen_composition_run(self,
                            composition,
                            simulation=False,
                            learning=False):
        name = 'run_sim_wrap_' if simulation else 'run_wrap_'
        name += composition.name
        args = [
            self.get_state_struct_type(composition).as_pointer(),
            self.get_param_struct_type(composition).as_pointer(),
            self.get_data_struct_type(composition).as_pointer(),
            self.get_input_struct_type(composition).as_pointer(),
            self.get_output_struct_type(composition).as_pointer(),
            self.int32_ty.as_pointer(),
            self.int32_ty.as_pointer()
        ]
        builder = self.create_llvm_function(args, composition, name)
        llvm_func = builder.function
        for a in llvm_func.args:
            a.attributes.add('noalias')

        state, params, data, data_in, data_out, runs_ptr, inputs_ptr = llvm_func.args
        # simulation does not care about the output
        # it extracts results of the controller objective mechanism
        if simulation:
            data_out.attributes.remove('nonnull')

        if not simulation and "const_data" in debug_env:
            const_data = data.type.pointee(
                composition._get_data_initializer(None))
            data = builder.alloca(data.type.pointee)
            builder.store(const_data, data)

        # Hardcode stateful parameters if set in the environment
        if not simulation and "const_state" in debug_env:
            const_state = state.type.pointee(
                composition._get_state_initializer(None))
            state = builder.alloca(const_state.type, name="const_state_loc")
            builder.store(const_state, state)

        if not simulation and "const_input" in debug_env:
            if not debug_env["const_input"]:
                input_init = pnlvm._tupleize(
                    [[os.defaults.variable]
                     for os in composition.input_CIM.input_ports])
                print("Setting default input: ", input_init)
            else:
                input_init = ast.literal_eval(debug_env["const_input"])
                print("Setting user input: ", input_init)

            builder.store(data_in.type.pointee(input_init), data_in)
            builder.store(inputs_ptr.type.pointee(1), inputs_ptr)

        # Allocate and initialize condition structure
        cond_gen = ConditionGenerator(self, composition)
        cond_type = cond_gen.get_condition_struct_type()
        cond = builder.alloca(cond_type)
        cond_init = cond_type(cond_gen.get_condition_initializer())
        builder.store(cond_init, cond)

        if learning:
            # Call training function
            data_in_ptr = builder.gep(data_in, [self.int32_ty(0)])
            exec_learning_f = self.import_llvm_function(composition)
            builder.call(exec_learning_f,
                         [state, params, data_in_ptr, data, cond])

        runs = builder.load(runs_ptr, "runs")
        with pnlvm.helpers.for_loop_zero_inc(builder, runs,
                                             "run_loop") as (b, iters):
            # Get the right input stimulus
            input_idx = b.urem(iters, b.load(inputs_ptr))
            data_in_ptr = b.gep(data_in, [input_idx])

            # Call execution
            if learning:
                composition.learning_enabled = False
            if simulation:
                exec_f = self.import_llvm_function(
                    composition._llvm_simulation.name)
            else:
                exec_f = self.import_llvm_function(composition)
            if learning:
                composition.learning_enabled = True
            b.call(exec_f, [state, params, data_in_ptr, data, cond])

            if not simulation:
                # Extract output_CIM result
                idx = composition._get_node_index(composition.output_CIM)
                result_ptr = b.gep(
                    data,
                    [self.int32_ty(0),
                     self.int32_ty(0),
                     self.int32_ty(idx)])
                output_ptr = b.gep(data_out, [iters])
                result = b.load(result_ptr)
                b.store(result, output_ptr)

        builder.ret_void()
        return llvm_func

    def gen_multirun_wrapper(self, function: ir.Function) -> ir.Function:
        if function.module is not self.module:
            function = ir.Function(self.module, function.type.pointee,
                                   function.name)
            assert function.is_declaration

        args = [a.type for a in function.args]
        args.append(self.int32_ty.as_pointer())
        multirun_ty = ir.FunctionType(function.type.pointee.return_type, args)
        multirun_f = ir.Function(self.module, multirun_ty,
                                 function.name + "_multirun")
        block = multirun_f.append_basic_block(name="entry")
        builder = ir.IRBuilder(block)

        multi_runs = builder.load(multirun_f.args[-1])
        # Runs need special handling. data_in and data_out are one dimensional,
        # but hold entries for all parallel invocations.
        is_comp_run = len(function.args) == 7
        if is_comp_run:
            runs_count = builder.load(multirun_f.args[5])
            input_count = builder.load(multirun_f.args[6])

        with pnlvm.helpers.for_loop_zero_inc(builder, multi_runs,
                                             "multi_run_loop") as (b, index):
            # Index all pointer arguments
            indexed_args = []
            for i, arg in enumerate(multirun_f.args[:-1]):
                # Don't adjust #inputs and #trials
                if isinstance(arg.type, ir.PointerType):
                    offset = index
                    # #runs and #trials needs to be the same for every invocation
                    if is_comp_run and i >= 5:
                        offset = self.int32_ty(0)
                    # data arrays need special handling
                    elif is_comp_run and i == 4:  # data_out
                        offset = b.mul(index, runs_count)
                    elif is_comp_run and i == 3:  # data_in
                        offset = b.mul(index, input_count)

                    arg = b.gep(arg, [offset])

                indexed_args.append(arg)

            b.call(function, indexed_args)

        builder.ret_void()
        return multirun_f

    def convert_python_struct_to_llvm_ir(self, t):
        if type(t) is list:
            assert all(type(x) is type(t[0]) for x in t)
            elem_t = self.convert_python_struct_to_llvm_ir(t[0])
            return ir.ArrayType(elem_t, len(t))
        elif type(t) is tuple:
            elems_t = (self.convert_python_struct_to_llvm_ir(x) for x in t)
            return ir.LiteralStructType(elems_t)
        elif isinstance(t, (int, float)):
            return self.float_ty
        elif isinstance(t, np.ndarray):
            return self.convert_python_struct_to_llvm_ir(t.tolist())
        elif t is None:
            return ir.LiteralStructType([])
        elif isinstance(t, np.random.RandomState):
            return pnlvm.builtins.get_mersenne_twister_state_struct(self)
        elif torch_available and isinstance(t, torch.Tensor):
            return self.convert_python_struct_to_llvm_ir(t.numpy())
        assert False, "Don't know how to convert {}".format(type(t))
示例#20
0
 def make_arg_externs(self, scope, jit):
     '''Create arg and argf Funtions in case of JIT, and also main when compiling'''
     if jit is None:
         self.type = 'char**'
         global_argv_constant = ir.Constant(self.ir_type, None)
         global_argv = ir.GlobalVariable(scope.module, self.ir_type,
                                         'global_argv')
         global_argv.initializer = global_argv_constant
         self.type = 'int'
         global_argc_constant = ir.Constant(self.ir_type, None)
         global_argc = ir.GlobalVariable(scope.module, self.ir_type,
                                         'global_argc')
         global_argc.initializer = global_argc_constant
         self.type = 'char**'
         main_func_type = ir.FunctionType(ir.IntType(32),
                                          [ir.IntType(32), self.ir_type])
         main_func = ir.Function(scope.module, main_func_type, name='main')
         argc, argv = main_func.args
         main_block = main_func.append_basic_block()
         main_builder = ir.IRBuilder(main_block)
         main_builder.store(argv, global_argv)
         main_builder.store(argc, global_argc)
         run_ret = main_builder.call(scope.get_ptr('run'), [])
         main_builder.ret(run_ret)
         self.type = 'char*'
         atoi_type = ir.FunctionType(ir.IntType(32), [self.ir_type])
         atoi = ir.Function(scope.module, atoi_type, name='atoi')
         atof_type = ir.FunctionType(ir.DoubleType(), [self.ir_type])
         atof = ir.Function(scope.module, atof_type, name='atof')
         for name, type, type_str in zip(['arg', 'argf'], [int, float],
                                         ['int', 'float']):
             try:
                 func = scope.get_ptr(name)
             except:
                 continue
             try:
                 if scope(name, [
                         Decl(lineno=-1,
                              noalias=False,
                              ref=False,
                              type='int')
                 ]) != type_str:
                     raise ScopeException
             except ScopeException:
                 Exit.INVALID_EXTERN_BUILTIN(name)
             arg, = func.args
             entry_block = func.append_basic_block()
             func_builder = ir.IRBuilder(entry_block)
             argv = func_builder.load(global_argv)
             argc = func_builder.load(global_argc)
             index = func_builder.add(arg, ir.Constant(ir.IntType(32), 1))
             argc_check = func_builder.icmp_signed('<', index, argc)
             with func_builder.if_then(argc_check):
                 argv_index = func_builder.gep(argv, [index])
                 argv_n_str = func_builder.load(argv_index)
                 if type_str in ['int']:
                     atoival = func_builder.call(atoi, [argv_n_str])
                     func_builder.ret(atoival)
                 elif type_str in ['float']:
                     atofval = func_builder.call(atof, [argv_n_str])
                     self.type = 'float'
                     fval = func_builder.fptrunc(atofval, self.ir_type)
                     func_builder.ret(fval)
             self.type = type_str
             func_builder.ret(self.ir_type(type(0)))
     else:
         for name, type, type_str in zip(['arg', 'argf'], [int, float],
                                         ['int', 'float']):
             try:
                 func = scope.get_ptr(name)
             except:
                 continue
             try:
                 if scope(name, [
                         Decl(lineno=-1,
                              noalias=False,
                              ref=False,
                              type='int')
                 ]) != type_str:
                     raise ScopeException
             except ScopeException:
                 Exit.INVALID_EXTERN_BUILTIN(name)
             arg, = func.args
             entry_block = func.append_basic_block()
             func_builder = ir.IRBuilder(entry_block)
             default_block = func_builder.append_basic_block()
             self.type = type_str
             with func_builder.goto_block(default_block):
                 func_builder.ret(self.ir_type(0))
             switch = func_builder.switch(arg, default_block)
             for i, val in enumerate(jit):
                 switch_block = func_builder.append_basic_block()
                 with func_builder.goto_block(switch_block):
                     try:
                         func_builder.ret(self.ir_type(type(val)))
                     except:
                         func_builder.ret(self.ir_type(type(0)))
                 switch.add_case(i, switch_block)
示例#21
0
 def visit_Call(self, instr):
     # Add to any call that has float/double return type
     if instr.type in (ir.FloatType(), ir.DoubleType()):
         for flag in self.flags:
             instr.fastmath.add(flag)
示例#22
0
 def _codegen_NumberExprAST(self, node):
     return self.builder.constant(ir.DoubleType(), float(node.val))
示例#23
0
def declare_atomic_max_float64(lmod):
    fname = '___numba_atomic_double_max'
    fnty = ir.FunctionType(ir.DoubleType(),
                           (ir.PointerType(ir.DoubleType()), ir.DoubleType()))
    return cgutils.get_or_insert_function(lmod, fnty, fname)
示例#24
0
from typing import Any, Dict

import llvmlite.ir as ir

import kal_ast


class GenerateCodeError(Exception):
    pass


# NOTE Kaleidoscope uses double precision floating point for all values
ZERO = FALSE = ir.Constant(ir.DoubleType(), 0.0)
ONE = TRUE = ir.Constant(ir.DoubleType(), 1.0)

# NOTE fcmp_ordered means that neither operand can be a QNAN (quite NaN)
# NOTE fcmp_unordered means that either operand may be a QNAN (quite NaN)


class LLVMCodeGenerator:
    """ Node visitor class that generates LLVM IR code.

        Note: each `_emit_<Node>()` method should return an appropriate `ir.Value`.
    """
    def __init__(self):
        # Top-level container of all other LLVM IR objects
        self.module: ir.Module = ir.Module()

        # Current IR builder
        self.builder: ir.IRBuilder = None
示例#25
0
def cast_str_to_float64(context, builder, fromty, toty, val):
    fnty = lir.FunctionType(lir.DoubleType(), [lir.IntType(8).as_pointer()])
    fn = builder.module.get_or_insert_function(fnty, name="str_to_float64")
    return builder.call(fn, (val,))
示例#26
0
 def _emit_NumberExpr(self, node: kal_ast.NumberExpr) -> ir.Value:
     return ir.Constant(ir.DoubleType(), float(node.val))
示例#27
0
class LLVMBuilderContext:
    __global_context = None
    __uniq_counter = 0
    _llvm_generation = 0
    int32_ty = ir.IntType(32)
    float_ty = ir.DoubleType()
    bool_ty = ir.IntType(1)

    def __init__(self):
        self._modules = []
        self._cache = weakref.WeakKeyDictionary()
        self._stats = {
            "cache_misses": 0,
            "cache_requests": 0,
            "types_converted": 0,
            "param_structs_generated": 0,
            "state_structs_generated": 0,
            "data_structs_generated": 0,
            "input_structs_generated": 0,
            "output_structs_generated": 0,
        }

    def __enter__(self):
        module = ir.Module(name="PsyNeuLinkModule-" +
                           str(LLVMBuilderContext._llvm_generation))
        self._modules.append(module)
        LLVMBuilderContext._llvm_generation += 1
        return self

    def __exit__(self, e_type, e_value, e_traceback):
        assert len(self._modules) > 0
        module = self._modules.pop()
        _modules.add(module)
        _all_modules.add(module)

    @property
    def module(self):
        assert len(self._modules) > 0
        return self._modules[-1]

    @classmethod
    def get_global(cls):
        if cls.__global_context is None:
            cls.__global_context = LLVMBuilderContext()
        return cls.__global_context

    @classmethod
    def get_unique_name(cls, name: str):
        cls.__uniq_counter += 1
        name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
        return name + '_' + str(cls.__uniq_counter)

    def get_builtin(self, name: str, args=[], function_type=None):
        if name in _builtin_intrinsics:
            return self.import_llvm_function(_BUILTIN_PREFIX + name)
        if name in ('maxnum'):
            function_type = pnlvm.ir.FunctionType(args[0], [args[0], args[0]])
        return self.module.declare_intrinsic("llvm." + name, args,
                                             function_type)

    def create_llvm_function(self,
                             args,
                             component,
                             name=None,
                             *,
                             return_type=ir.VoidType(),
                             tags: frozenset = frozenset()):
        name = "_".join((str(component), *tags)) if name is None else name

        # Builtins are already unique and need to keep their special name
        func_name = name if name.startswith(
            _BUILTIN_PREFIX) else self.get_unique_name(name)
        func_ty = pnlvm.ir.FunctionType(return_type, args)
        llvm_func = pnlvm.ir.Function(self.module, func_ty, name=func_name)
        llvm_func.attributes.add('argmemonly')
        for a in llvm_func.args:
            if isinstance(a.type, ir.PointerType):
                a.attributes.add('nonnull')

        metadata = self.get_debug_location(llvm_func, component)
        if metadata is not None:
            scope = dict(metadata.operands)["scope"]
            llvm_func.set_metadata("dbg", scope)

        # Create entry block
        block = llvm_func.append_basic_block(name="entry")
        builder = pnlvm.ir.IRBuilder(block)
        builder.debug_metadata = metadata

        return builder

    def gen_llvm_function(self, obj, *, tags: frozenset) -> ir.Function:
        obj_cache = self._cache.setdefault(obj, dict())

        self._stats["cache_requests"] += 1
        if tags not in obj_cache:
            self._stats["cache_misses"] += 1
            with self:
                obj_cache[tags] = obj._gen_llvm_function(ctx=self, tags=tags)
        return obj_cache[tags]

    def import_llvm_function(self, fun, *,
                             tags: frozenset = frozenset()) -> ir.Function:
        """
        Get function handle if function exists in current modele.
        Create function declaration if it exists in a older module.
        """
        if isinstance(fun, str):
            f = _find_llvm_function(fun, _all_modules | {self.module})
        else:
            f = self.gen_llvm_function(fun, tags=tags)

        # Add declaration to the current module
        if f.name not in self.module.globals:
            decl_f = ir.Function(self.module, f.type.pointee, f.name)
            assert decl_f.is_declaration
            return decl_f
        return f

    @staticmethod
    def get_debug_location(func: ir.Function, component):
        if "debug_info" not in debug_env:
            return

        mod = func.module
        path = inspect.getfile(
            component.__class__) if component is not None else "<pnl_builtin>"
        d_version = mod.add_metadata(
            [ir.IntType(32)(2), "Dwarf Version",
             ir.IntType(32)(4)])
        di_version = mod.add_metadata(
            [ir.IntType(32)(2), "Debug Info Version",
             ir.IntType(32)(3)])
        flags = mod.add_named_metadata("llvm.module.flags")
        if len(flags.operands) == 0:
            flags.add(d_version)
            flags.add(di_version)
        cu = mod.add_named_metadata("llvm.dbg.cu")
        di_file = mod.add_debug_info(
            "DIFile", {
                "filename": os.path.basename(path),
                "directory": os.path.dirname(path),
            })
        di_func_type = mod.add_debug_info(
            "DISubroutineType",
            {
                # None as `null`
                "types": mod.add_metadata([None]),
            })
        di_compileunit = mod.add_debug_info(
            "DICompileUnit", {
                "language": ir.DIToken("DW_LANG_Python"),
                "file": di_file,
                "producer": "PsyNeuLink",
                "runtimeVersion": 0,
                "isOptimized": False,
            },
            is_distinct=True)
        cu.add(di_compileunit)
        di_func = mod.add_debug_info("DISubprogram", {
            "name": func.name,
            "file": di_file,
            "line": 0,
            "type": di_func_type,
            "isLocal": False,
            "unit": di_compileunit,
        },
                                     is_distinct=True)
        di_loc = mod.add_debug_info("DILocation", {
            "line": 0,
            "column": 0,
            "scope": di_func,
        })
        return di_loc

    @_comp_cached
    def get_input_struct_type(self, component):
        self._stats["input_structs_generated"] += 1
        if hasattr(component, '_get_input_struct_type'):
            return component._get_input_struct_type(self)

        default_var = component.defaults.variable
        return self.convert_python_struct_to_llvm_ir(default_var)

    @_comp_cached
    def get_output_struct_type(self, component):
        self._stats["output_structs_generated"] += 1
        if hasattr(component, '_get_output_struct_type'):
            return component._get_output_struct_type(self)

        default_val = component.defaults.value
        return self.convert_python_struct_to_llvm_ir(default_val)

    @_comp_cached
    def get_param_struct_type(self, component):
        self._stats["param_structs_generated"] += 1
        if hasattr(component, '_get_param_struct_type'):
            return component._get_param_struct_type(self)

        def _param_struct(p):
            val = p.get(None)  # this should use defaults
            if hasattr(val, "_get_compilation_params") or \
               hasattr(val, "_get_param_struct_type"):
                return self.get_param_struct_type(val)
            if isinstance(val, ContentAddressableList):
                return ir.LiteralStructType(
                    self.get_param_struct_type(x) for x in val)
            elif p.name == 'matrix':  # Flatten matrix
                val = np.asfarray(val).flatten()
            elif p.name == 'num_estimates':  # Should always be int
                val = np.int32(0) if val is None else np.int32(val)
            elif np.ndim(val) == 0 and component._is_param_modulated(p):
                val = [val]  # modulation adds array wrap
            return self.convert_python_struct_to_llvm_ir(val)

        elements = map(_param_struct, component._get_compilation_params())
        return ir.LiteralStructType(elements)

    @_comp_cached
    def get_state_struct_type(self, component):
        self._stats["state_structs_generated"] += 1
        if hasattr(component, '_get_state_struct_type'):
            return component._get_state_struct_type(self)

        def _state_struct(p):
            val = p.get(None)  # this should use defaults
            if hasattr(val, "_get_compilation_state") or \
               hasattr(val, "_get_state_struct_type"):
                return self.get_state_struct_type(val)
            if isinstance(val, ContentAddressableList):
                return ir.LiteralStructType(
                    self.get_state_struct_type(x) for x in val)
            struct = self.convert_python_struct_to_llvm_ir(val)
            return ir.ArrayType(struct, p.history_min_length + 1)

        elements = map(_state_struct, component._get_compilation_state())
        return ir.LiteralStructType(elements)

    @_comp_cached
    def get_data_struct_type(self, component):
        self._stats["data_structs_generated"] += 1
        if hasattr(component, '_get_data_struct_type'):
            return component._get_data_struct_type(self)

        return ir.LiteralStructType([])

    def get_node_wrapper(self, composition, node):
        cache = getattr(composition, '_node_wrappers', None)
        if cache is None:
            cache = dict()
            setattr(composition, '_node_wrappers', cache)
        return cache.setdefault(node, _node_wrapper(composition, node))

    def convert_python_struct_to_llvm_ir(self, t):
        self._stats["types_converted"] += 1
        if t is None:
            return ir.LiteralStructType([])
        elif type(t) is list:
            if len(t) == 0:
                return ir.LiteralStructType([])
            elems_t = [self.convert_python_struct_to_llvm_ir(x) for x in t]
            if all(x == elems_t[0] for x in elems_t):
                return ir.ArrayType(elems_t[0], len(elems_t))
            return ir.LiteralStructType(elems_t)
        elif type(t) is tuple:
            elems_t = [self.convert_python_struct_to_llvm_ir(x) for x in t]
            if len(elems_t) > 0 and all(x == elems_t[0] for x in elems_t):
                return ir.ArrayType(elems_t[0], len(elems_t))
            return ir.LiteralStructType(elems_t)
        elif isinstance(t, enum.Enum):
            # FIXME: Consider enums of non-int type
            assert all(round(x.value) == x.value for x in type(t))
            return self.int32_ty
        elif isinstance(t, (int, float, np.floating)):
            return self.float_ty
        elif isinstance(t, np.integer):
            # Python 'int' is handled above as it is the default type for '0'
            return ir.IntType(t.nbytes * 8)
        elif isinstance(t, np.ndarray):
            return self.convert_python_struct_to_llvm_ir(t.tolist())
        elif isinstance(t, np.random.RandomState):
            return pnlvm.builtins.get_mersenne_twister_state_struct(self)
        elif isinstance(t, Time):
            return ir.ArrayType(self.int32_ty, len(TimeScale))
        elif isinstance(t, SampleIterator):
            if isinstance(t.generator, list):
                return ir.ArrayType(self.float_ty, len(t.generator))
            # Generic iterator is {start, increment, count}
            return ir.LiteralStructType(
                (self.float_ty, self.float_ty, self.int32_ty))
        assert False, "Don't know how to convert {}".format(type(t))
示例#28
0
 def expression(self, node, builder):
     if node.type == '!':
         return builder.neg(self.expression(node.children[0], builder), name='neg')
     elif node.type in ['-', '+', '*', '/']:
         left = self.expression(node.children[0], builder)
         right = self.expression(node.children[1], builder)
         if node.type == '+':
             if str(left.type) in 'i32' and str(right.type) in 'i32':
                 return builder.add(left, right, name='add', flags=())
             else:
                 left = builder.sitofp(left, ir.DoubleType())
                 right = builder.sitofp(right, ir.DoubleType())
                 return builder.fadd(left, right, name='add', flags=())
         elif node.type == '-':
             if str(left.type) in 'i32' and str(right.type) in 'i32':
                 return builder.sub(left, right, name='sub', flags=())
             else:
                 left = builder.sitofp(left, ir.DoubleType())
                 right = builder.sitofp(right, ir.DoubleType())
                 return builder.fsub(left, right, name='sub', flags=())
         elif node.type == '*':
             if str(left.type) in 'i32' and str(right.type) in 'i32':
                 return builder.mul(left, right, name='mul', flags=())
             else:
                 left = builder.sitofp(left, ir.DoubleType())
                 right = builder.sitofp(right, ir.DoubleType())
                 return builder.fmul(left, right, name='mul', flags=())
         elif node.type == '/':
             if str(left.type) in 'i32' and str(right.type) in 'i32':
                 return builder.sdiv(left, right, name='div', flags=())
             else:
                 left = builder.sitofp(left, ir.DoubleType())
                 right = builder.sitofp(right, ir.DoubleType())
                 return builder.fdiv(left, right, name='div', flags=())
     elif node.type in ['=', '<', '>', '>=', '<=']:
         cond = node.type
         if node.type == '=':
             cond = '=='
         return builder.icmp_signed(cond, self.expression(node.children[0], builder), self.expression(node.children[1], builder), name='cond')
     elif node.type == '||':
         return builder.or_(self.expression(node.children[0], builder), self.expression(node.children[1], builder), name='cond')
     elif node.type == '&&':
         return builder.and_(self.expression(node.children[0], builder), self.expression(node.children[1], builder), name='cond')
     elif node.type == 'function_call':
         function = self.get_function_object(node.children[0].type)
         arg_list = self.get_argument_list(node.children[0], builder) #retorna lista com argumentos prontos
         modified_arguments = self.change_argument_type(function, arg_list, builder)
         call = builder.call(function, modified_arguments, 'ret')
         return call
     elif len(node.children) == 0: #simplesmente coloca no ir.constant ou faz load e retorna
         var_type = self.get_string_type(node.type)
         value = None
         if var_type == 1:
             value = ir.Constant(ir.IntType(32), int(node.type))
         elif var_type == 2:
             value = ir.Constant(ir.DoubleType(), float(node.type))
         else:
             alloca = self.get_alloca_object(node.type)
             value = builder.load(alloca, name='', align=4)
         return value
     elif self.represents_id(node.type) != None: #significa que é o ID de um vetor
         alloca = self.get_alloca_object(node.type)
         index = self.expression(node.children[0], builder) #índice do vetor
         zero = ir.Constant(ir.IntType(32), 0)
         gep = builder.gep(alloca, [zero, zero], inbounds=True)
         gep = builder.gep(gep, [index], inbounds=True)
         return builder.load(gep, '', 4)
示例#29
0
def dist_get_time(context, builder, sig, args):
    fnty = lir.FunctionType(lir.DoubleType(), [])
    fn = cgutils.get_or_insert_function(builder.module,
                                        fnty,
                                        name="hpat_get_time")
    return builder.call(fn, [])
示例#30
0
class F64Type(BaseType):
    base_llvm_type = ll.DoubleType()
    size_in_bits = 64
    is_floating_point = True
    str_repr = "f64"