Пример #1
0
    def test_basic4(self):
        """
        Test that a dispatcher object can be used as input to another
         function with signature as part of a tuple
        """
        a = 1

        @njit
        def foo1(x):
            return x + 1

        @njit
        def foo2(x):
            return x + 2

        tup = (foo1, foo2)
        int_int_fc = types.FunctionType(types.int64(types.int64, ))

        @njit(types.int64(types.UniTuple(int_int_fc, 2)))
        def bar(fcs):
            x = 0
            for i in range(2):
                x += fcs[i](a)
            return x

        self.assertEqual(bar(tup), foo1(a) + foo2(a))
Пример #2
0
    def test_basic5(self):
        a = 1

        @njit
        def foo1(x):
            return x + 1

        @njit
        def foo2(x):
            return x + 2

        @njit
        def bar1(x):
            return x / 10

        @njit
        def bar2(x):
            return x / 1000

        tup = (foo1, foo2)
        tup_bar = (bar1, bar2)
        int_int_fc = types.FunctionType(types.int64(types.int64, ))

        flt_flt_fc = types.FunctionType(types.float64(types.float64, ))

        @njit((types.UniTuple(int_int_fc, 2), types.UniTuple(flt_flt_fc, 2)))
        def bar(fcs, ffs):
            x = 0
            for i in range(2):
                x += fcs[i](a)
            for fn in ffs:
                x += fn(a)
            return x

        got = bar(tup, tup_bar)
        expected = foo1(a) + foo2(a) + bar1(a) + bar2(a)
        self.assertEqual(got, expected)
    def test_apply_function_in_function(self):
        def foo(f, f_inner):
            return f(f_inner)

        @cfunc('int64(float64)')
        def f_inner(i):
            return int64(i * 3)

        @cfunc(int64(types.FunctionType(f_inner._sig)))
        def f(f_inner):
            return f_inner(123.4)

        self.assertEqual(
            jit(nopython=True)(foo)(f, f_inner), foo(f._pyfunc,
                                                     f_inner._pyfunc))
Пример #4
0
def make_neural_network(layer_sizes,
                        layer_activations,
                        recurrent_layers,
                        learning_rate=0.01,
                        low=-2,
                        high=2):
    for size in layer_sizes:
        assert size > 0

    # Initialize typed layer sizes list.
    typed_layer_sizes = typed.List()
    for size in layer_sizes:
        typed_layer_sizes.append(size)

    # Initialie typed layer activation method strings list.
    prototype = types.FunctionType(types.float64[:, ::1](types.float64[:, ::1],
                                                         types.boolean))
    typed_layer_activations = typed.List.empty_list(prototype)
    for activation in layer_activations:
        typed_layer_activations.append(activation)

    # Initialize typed recurrent layers.
    typed_recurrent_layers = typed.List()
    for val in recurrent_layers:
        typed_recurrent_layers.append(val)

    # Initialize weights between every neuron in all adjacent layers.
    typed_weights = typed.List()
    for i in range(1, len(layer_sizes)):
        typed_weights.append(
            np.random.uniform(low, high, (layer_sizes[i - 1], layer_sizes[i])))

    # Initialize biases for every neuron in all layers
    typed_biases = typed.List()
    for i in range(1, len(layer_sizes)):
        typed_biases.append(np.random.uniform(low, high, (layer_sizes[i], 1)))

    # Initialize empty list of output of every neuron in all layers.
    typed_layer_outputs = typed.List()
    for i in range(len(layer_sizes)):
        typed_layer_outputs.append(np.zeros((layer_sizes[i], 1)))

    typed_learning_rate = learning_rate
    return NeuralNetwork(typed_layer_sizes, typed_layer_activations,
                         typed_recurrent_layers, typed_weights, typed_biases,
                         typed_layer_outputs, typed_learning_rate)
Пример #5
0
    def test_basic2(self):
        """
        Test that a dispatcher object *without* a pre-compiled overload
        can be used as input to another function with locked-down signature
        """
        a = 1

        @njit
        def foo(x):
            return x + 1

        int_int_fc = types.FunctionType(types.int64(types.int64, ))

        @njit(types.int64(int_int_fc))
        def bar(fc):
            return fc(a)

        self.assertEqual(bar(foo), foo(a))
Пример #6
0
    def test_basic3(self):
        """
        Test that a dispatcher object *without* a pre-compiled overload
        can be used as input to another function with locked-down signature and
        that it behaves as a truly generic function (foo1 does not get locked)
        """
        a = 1

        @njit
        def foo1(x):
            return x + 1

        @njit
        def foo2(x):
            return x + 2

        int_int_fc = types.FunctionType(types.int64(types.int64, ))

        @njit(types.int64(int_int_fc))
        def bar(fc):
            return fc(a)

        self.assertEqual(bar(foo1) + 1, bar(foo2))
import numpy as np
from numba.experimental import jitclass
from numba import njit, types, typed, prange
import z_helper as h
import time

from numba.core.errors import NumbaTypeSafetyWarning
import warnings

warnings.simplefilter('ignore', category=NumbaTypeSafetyWarning)

spec = [
    ("layer_sizes", types.ListType(types.int64)),
    ("layer_activations",
     types.ListType(
         types.FunctionType(types.float64[:, ::1](types.float64[:, ::1],
                                                  types.boolean)))),
    ("weights", types.ListType(types.float64[:, ::1])),
    ("biases", types.ListType(types.float64[:, ::1])),
    ("layer_outputs", types.ListType(types.float64[:, ::1])),
    ("learning_rate", types.float64),
]


@jitclass(spec)
class NeuralNetwork:
    def __init__(self, layer_sizes, layer_activations, weights, biases,
                 layer_outputs, learning_rate):
        self.layer_sizes = layer_sizes
        self.layer_activations = layer_activations
        self.weights = weights
        self.biases = biases
    #     new_pn = _cast_structref(BasePredicateNodeType, new_a)
    # else:
    #     b = _cast_structref(GenericBetaPredicateNodeType, pn)
    #     new_b = new(GenericBetaPredicateNodeType)
    #     new_b.filter_func = b.filter_func
    #     new_b.right_t_id = b.right_t_id
    #     new_b.right_facts = b.right_facts
        
    #     new_pn = _cast_structref(BasePredicateNodeType, new_b)

    
    return link_data


meminfo_type = types.MemInfoPointer(types.voidptr)
alpha_filter_func_type = types.FunctionType(i8[::1](meminfo_type, PredicateNodeLinkDataType, i8[::1], u1))
beta_filter_func_type = types.FunctionType(i8[:,::1](meminfo_type, PredicateNodeLinkDataType, i8[::1], i8[::1], u1))


#### Struct Definitions ####

base_predicate_node_field_dict = {
    #### Attributes filled in at definition time ###
    "id_str" : unicode_type,
    "is_alpha" : u1,
    
    "left_fact_type_name" : unicode_type,
    "right_fact_type_name" : unicode_type,

    
    "left_attr_offsets" : i8[::1],#types.Any,
Пример #9
0

@njit(cache=True, fastmath=checks.fastmath, nogil=True)
def _find_edge_idx(node_edge_map: Dict, edge_data: np.ndarray,
                   start_nd_idx: int, end_nd_idx: int) -> int:
    """
    Finds an edge from start and end nodes
    """
    # iterate the start node's edges
    for edge_idx in node_edge_map[start_nd_idx]:
        # check whether the edge's out node matches the target node
        if edge_data[edge_idx, 1] == end_nd_idx:
            return edge_idx


node_close_func_proto = types.FunctionType(
    types.float64(types.float64, types.float64, types.float64, types.float64))


# node density
@njit(cache=True, fastmath=checks.fastmath, nogil=True)
def node_density(to_short_dist, to_simpl_dist, beta, cycles):
    return 1.0  # return float explicitly


# node farness
@njit(cache=True, fastmath=checks.fastmath, nogil=True)
def node_farness(to_short_dist, to_simpl_dist, beta, cycles):
    return to_short_dist


# node cycles
Пример #10
0
    #upstream BaseSubscribers' meminfos that need to be updated before this can be.
    ("upstream", ListType(meminfo_type)),
    #The subscribers immediately downstream of this one.
    ("children", ListType(meminfo_type)),
    ("change_head", i8),
    ("grow_head", i8),
    ("change_queue", VectorType),  #ListType(u8)),
    ("grow_queue", VectorType),  #ListType(u8)),

    # #Indicies or idrecs of things that have changed in the subscriber's parent (i.e. last
    # #  upstream). The parent is responsible for filling this.
    # ("change_queue", VectorType),#ListType(u8)),
    # #Same as change_queue but for when the something has been added upstream
    # ("grow_queue", VectorType),#ListType(u8)),
    #An update function that updates state of the subscriber and pushes changes to all children.
    ("update_func", types.FunctionType(void(meminfo_type))),
    # #The t_id corresponding to the type to which this subscriber subscribes
    # ("t_id", i8)
    #
]

BASE_SUBSCRIBER_QUEUE_SIZE = 8

BaseSubscriber, BaseSubscriberType = define_structref("BaseSubscriber",
                                                      base_subscriber_fields)


@njit(cache=True)
def init_base_subscriber(bs):
    bs.kb_meminfo = None  #_meminfo_from_struct(kb)
    bs.upstream = List.empty_list(meminfo_type)