def generate_link_data(pn, kb):
    '''Takes a prototype predicate node and a knowledge base and returns
        a linked instance of that predicate node, which is either a copy
        or an equivalent instance already linked to the knowledge base'''
    link_data = new(PredicateNodeLinkDataType)
    # print(pn.left_fact_type_name)
    # print(kb.context_data.fact_to_t_id)
    # print(pn.left_fact_type_name)
    # print("Q")
    link_data.left_t_id = kb.context_data.fact_to_t_id[pn.left_fact_type_name]
    # print("Q2", link_data.left_t_id)
    link_data.left_facts = facts_for_t_id(kb.kb_data,i8(link_data.left_t_id)) 
    # print("Z")
    if(not pn.is_alpha):
        link_data.right_t_id = kb.context_data.fact_to_t_id[pn.right_fact_type_name]
        link_data.right_facts = facts_for_t_id(kb.kb_data,i8(link_data.right_t_id)) 
        link_data.left_consistency = np.empty((0,),dtype=np.uint8)
        link_data.right_consistency = np.empty((0,),dtype=np.uint8)
    else:
        link_data.right_t_id = -1

    # print("S")

    link_data.change_head = 0
    link_data.grow_head = 0
    link_data.change_queue = new_vector(8)
    link_data.grow_queue = new_vector(8)

    link_data.kb_grow_queue = kb.kb_data.grow_queue
    link_data.kb_change_queue = kb.kb_data.change_queue
    link_data.truth_values = np.empty((0,0),dtype=np.uint8)
        
    # print("DONE")

    # print(pn.is_alpha)
    # if(pn.is_alpha):
    #     a = _cast_structref(GenericAlphaPredicateNodeType, pn)
    #     new_a = new(GenericAlphaPredicateNodeType)
    #     new_a.filter_func = a.filter_func
    #     new_a.right_val = a.right_val

    #     new_pn = _cast_structref(BasePredicateNodeType, new_a)
    # else:
    #     b = _cast_structref(GenericBetaPredicateNodeType, pn)
    #     new_b = new(GenericBetaPredicateNodeType)
    #     new_b.filter_func = b.filter_func
    #     new_b.right_t_id = b.right_t_id
    #     new_b.right_facts = b.right_facts
        
    #     new_pn = _cast_structref(BasePredicateNodeType, new_b)

    
    return link_data
Esempio n. 2
0
def _struct_get_attr_offset(typingctx, inst, attr):
    '''Get the offset of the attribute 'attr' from the base address of the struct
        pointed to by structref 'inst'
    '''
    attr_literal = attr.literal_value

    def codegen(context, builder, sig, args):
        inst_type, _ = sig.args
        val, _ = args

        if (not isinstance(inst_type, types.StructRef)):
            #If we get just get the type and not an instance make a dummy instance
            inst_type = inst_type.instance_type
            val = context.make_helper(builder, inst_type)._getvalue()

        #Get the base address of the struct data
        utils = _Utils(context, builder, inst_type)
        baseptr = utils.get_data_pointer(val)
        baseptr = builder.ptrtoint(baseptr, cgutils.intp_t)

        #Get the address of member for 'attr'
        dataval = utils.get_data_struct(val)
        attrptr = dataval._get_ptr_by_name(attr_literal)
        attrptr = builder.ptrtoint(attrptr, cgutils.intp_t)

        #Subtract them to get the offset
        offset = builder.sub(attrptr, baseptr)
        return offset

    sig = i8(inst, attr)
    return sig, codegen
def alpha_eval_truth(facts, f_id, pred_node):
    '''Updates an AlphaPredicateNode with fact at f_id'''
    inst_ptr = facts.data[i8(f_id)]
    if(inst_ptr != 0):
        val = deref_attrs(pred_node.left_type, inst_ptr, pred_node.left_attr_offsets)
        return exec_op(pred_node.op_str, val, pred_node.right_val)
    else:
        return 0xFF
Esempio n. 4
0
def get_s64(buf, offset, length):
    if length < 8:
        return (0, offset, length)
    a = nb.i8(buf[offset + 7]) << 54
    b = nb.i8(buf[offset + 6]) << 48
    c = nb.i8(buf[offset + 5]) << 40
    d = nb.i8(buf[offset + 4]) << 32
    e = nb.i8(buf[offset + 3]) << 24
    f = nb.i8(buf[offset + 2]) << 16
    g = nb.i8(buf[offset + 1]) << 8
    h = nb.i8(buf[offset + 0]) << 0
    return a | b | c | d | e | f | g | h, offset + 8, length - 8
Esempio n. 5
0
def _pointer_to_data_pointer(typingctx, raw_ptr):
    def codegen(context, builder, sig, args):
        raw_ptr, = args
        raw_ptr_ty, = sig.args

        meminfo = builder.inttoptr(raw_ptr, cgutils.voidptr_t)
        data_ptr = context.nrt.meminfo_data(builder, meminfo)
        ret = builder.ptrtoint(data_ptr, cgutils.intp_t)

        return ret

    sig = i8(raw_ptr, )
    return sig, codegen
Esempio n. 6
0
def _struct_get_data_pointer(typingctx, inst_type):
    '''get the base address of the struct pointed to by structref 'inst' '''
    def codegen(context, builder, sig, args):
        val_ty, = sig.args
        val, = args

        utils = _Utils(context, builder, val_ty)
        dataptr = utils.get_data_pointer(val)
        ret = builder.ptrtoint(dataptr, cgutils.intp_t)
        return ret

    sig = i8(inst_type, )
    return sig, codegen
Esempio n. 7
0
def _pointer_from_struct(typingctx, val):
    def codegen(context, builder, sig, args):
        [td] = sig.args
        [d] = args

        ctor = cgutils.create_struct_proxy(td)
        dstruct = ctor(context, builder, value=d)
        meminfo = dstruct.meminfo

        res = builder.ptrtoint(dstruct.meminfo, cgutils.intp_t)

        return res

    sig = i8(val, )
    return sig, codegen
def _pointer_from_struct_incref(typingctx, val):
    def codegen(context, builder, sig, args):
        [td] = sig.args
        [d] = args

        ctor = cgutils.create_struct_proxy(td)
        dstruct = ctor(context, builder, value=d)
        meminfo = dstruct.meminfo

        #Incref to prevent struct from being freed
        context.nrt.incref(builder, types.MemInfoPointer(types.voidptr), meminfo)

        res = builder.ptrtoint(dstruct.meminfo, cgutils.intp_t)

        return res
        
    sig = i8(val,)
    return sig, codegen
Esempio n. 9
0
from numba import jit, uint8, i8, vectorize
import numba as nb
import numpy as np
import datetime
import cv2
import array

url = "rtsp://*****:*****@192.168.254.6:554/Streaming/Channels/101"


@jit([nb.void(i8, i8)], nopython=True)
def add_with_vec(yy, c):
    print(yy, c)


@jit([nb.void(uint8[:, :, ::1])], forceobj=True)
def checkPic(frame):
    print(nb.typeof(frame))


add_with_vec(i8(10), i8(2))

video_capture = cv2.VideoCapture(url)
ret, frame = video_capture.read()

# print(nb.typeof(frame))

checkPic(frame)
Esempio n. 10
0
                splitidx -= 1
        score += btleaves[tree, splitidx - 7]
        if score < btleaves[tree, 8]:
            score = -50.
            break
    return score


"""
returns the samples with the top predictions for a single lidar sweep
TODO return detection scores as well
"""


@nb.njit(
    nb.i8(nb.f8[:, :], nb.i8[:], nb.f8[:, :, :, :], nb.i8[:, :, :],
          nb.f8[:, :], nb.f8[:, :], nb.b1[:, :, :, :], nb.f8[:], nb.i8))
def predictNegs(pts, tileidxs, groundTs, btsplits, btleaves, pts2suppress,
                detections, scores, detectioncount):
    gridshape = (anchornangles, localgridlen[0], localgridlen[1], anchorlen[2])
    grid = np.zeros(gridshape, dtype=np.bool8)

    nanchors_strided = len(anchorinlocalgrid_strided)
    ndetections = detections.shape[0]
    ntrees = btsplits.shape[0]

    pts2suppress_range = 2 + localgridlen * anchorstep[:2] / 2.
    centerpoint_grid = np.zeros(2, dtype=np.float64)

    roughgridshape = (localgridlen[0] // 3 + 1, localgridlen[1] // 3 + 1,
                      anchorlen[2] // 3 + 1)
    roughgrid = np.zeros(roughgridshape, dtype=np.bool8)
Esempio n. 11
0
    Parameters
    ----------
    q : i8[:]
        リングバッファ
    v : i8
        挿入する要素
    """
    head, size = q[-2], q[-1]
    qmax = q.size - 2
    head = (head - 1) % qmax
    q[head] = v
    q[-2] = head
    q[-1] += 1


@njit(i8(i8[:]), cache=True)
def rb_pop(q: i8[:]) -> i8:
    """リングバッファの末尾から要素を削除します。

    Parameters
    ----------
    q : i8[:]
        リングバッファ

    Returns
    -------
    v : i8
        削除された末尾の要素
    """
    head, size = q[-2], q[-1]
    qmax = q.size - 2
Esempio n. 12
0
    c = c.flatten()
    elsbyvalue = np.argpartition(c, nvals)
    elsinorder = np.sort(elsbyvalue[:nvals])
    cp = np.searchsorted(elsinorder // n, np.arange(m + 1)).astype(np.int32)
    ci = (elsinorder % n).astype(np.int32)
    cx = c[elsinorder].astype(np.float64)
    cstruct = cs_di_sparse(c_int(nvals), c_int(m), c_int(n), byref(cp),
                           byref(ci), byref(cx), c_int(nvals))
    # have to return numpy arrays too, or they might get recycled
    return (cstruct, cp, ci, cx)


import numba as nb


@nb.njit(nb.i8(nb.i8[:, :], nb.b1[:, :], nb.i4[:, :, :], nb.i8[:, :], nb.i8))
def processOutput(matches, hypotheses, out_assocs, backward_index, n_matches):
    """
    Transforms the pairs found by the data association algorithm to a more usable
    format for tracking: a vector of matches and a binary matrix of associations.
    Usually it is also necessary to only keep a fixed number of matches.
    This version removes matches that are found after the limit has been hit,
    without considering the relative probabilities of existence.
    A serious tracker will probably want a better one - i.e. summing hypothesis
    scores for each match to estimate total probabilities of existence.
    """
    nm = 0
    nsols = out_assocs.shape[0]
    matches[:] = -1
    backward_index[:] = -1
    hypotheses[:] = False
Esempio n. 13
0
import sys
import numpy as np
from numba import njit, i8


@njit(i8(i8[:, :]), cache=True)
def solve(a):
    h, w = a.shape
    dp = np.zeros_like(a)
    dp[-1, -1] = a[-1, -1]
    if (h + w) & 1 == 0:
        dp[-1, -1] *= -1
    for i in range(h - 1, -1, -1):
        for j in range(w - 1, -1, -1):
            if i == h - 1 and j == w - 1:
                continue
            if (i + j) & 1 == 0:
                if j == w - 1:
                    dp[i, j] = dp[i + 1, j] - a[i, j]
                elif i == h - 1:
                    dp[i, j] = dp[i, j + 1] - a[i, j]
                else:
                    dp[i, j] = max(dp[i + 1, j], dp[i, j + 1]) - a[i, j]
            else:
                if j == w - 1:
                    dp[i, j] = dp[i + 1, j] + a[i, j]
                elif i == h - 1:
                    dp[i, j] = dp[i, j + 1] + a[i, j]
                else:
                    dp[i, j] = min(dp[i + 1, j], dp[i, j + 1]) + a[i, j]
    dp[0, 0] += a[0, 0]
Esempio n. 14
0
    limit_i = ((veh_length - cum_length) // 50) - 0
    while i < limit_i:
        ms_layout += '-'
        i += 1

    return ms_layout


def layout_to_int(vehicle_layout):
    layout_int = []
    for letter in vehicle_layout:
        layout_int.append(ord(letter))
    return np.array(layout_int, dtype=int)


@nb.njit(nb.i8(nb.int8, nb.int8))
def d_s(a, b):
    """
    d_s is a map from A X A -> R^+ and is called the Substitution Map. In
    particular, d_s(a, b) is the distance associated with substituting
    b for a, a,b ∈ A. For all a ∈ A, d_s(a,a) is generally assigned the value
    zero, although this is not Mandatory.

    """
    tilde = 126  # ~ 126
    hyphen = 45  # - 45
    star = 42  # * 42
    return (
        0 if a == b else
        0 if a == tilde and b == hyphen else  # '~' '-'
        0 if a == hyphen and b == tilde else  # '-' '~'
Esempio n. 15
0
        score = np.zeros(actions.shape[1])
        for i in range(actions.shape[1]):
            board[actions[0, i], actions[1, i]] = 1
            score[i] = getMove(-board, model, False, depth - 1)
            board[actions[0, i], actions[1, i]] = 0
        else:
            if flag:
                if np.max(score) == -2:
                    return getMove(board, model, True, depth=1)
                else:
                    return np.argmax(score)
            else:
                return -np.max(score)


@numba.jit(numba.i8(numba.i1[:, :], numba.b1, numba.i8), cache=True)
def Mate(board, flag, depth):
    '''board, model, flag, depth
    flag: if this function is for idx or for score'''
    if depth == 1:
        actions = np.array(np.where(board == 0))
        for i in range(actions.shape[1]):
            board[actions[0, i], actions[1, i]] = 1
            if winning(board.flatten()):
                board[actions[0, i], actions[1, i]] = 0
                if flag:
                    return i
                else:
                    return -1
            board[actions[0, i], actions[1, i]] = 0
        else:
Esempio n. 16
0
# -*- coding: utf-8 -*-
"""
last mod 5/14/19
"""
import numpy as np
import numba as nb
from time import time


@nb.njit(nb.i8(nb.b1[:, :, :, :], nb.i8[:], nb.i8, nb.i8, nb.i8[:]))
def orderIdxsBySplit(X, idxs, start, end, split):
    """
    partition array[start:end] so that split_directions==True are on left
    """
    j = start
    for i in xrange(start, end):
        xi = idxs[i]
        if np.any(X[xi, split[0]:split[3], split[1]:split[4],
                    split[2]:split[5]]):
            idxs[i] = idxs[j]
            idxs[j] = xi
            j += 1
    return j


@nb.njit(nb.f8(nb.f8, nb.f8, nb.f8, nb.f8))
def calcScore(gradin, hessin, gradsum, hesssum):
    gradout = gradsum - gradin
    hessout = hesssum - hessin
    return gradin * gradin / max(hessin, 1e-10) + gradout * gradout / max(
        hessout, 1e-10)
Esempio n. 17
0
from __future__ import division
import numpy as np
import os.path
import numba

#print('load the precomputed values from ', os.path.join(os.path.dirname(__file__), 'Wigner_coefficients.npy'))
_Wigner_coefficients = np.load(os.path.join(os.path.dirname(__file__), 'Wigner_coefficients.npy'))

@numba.njit(numba.i8(numba.i8, numba.i8, numba.i8),
            cache=True, fastmath=True, nogil=True)
def _Wigner_index(twoj, twomp, twom):
    return twoj*((2*twoj + 3) * twoj + 1) // 6 + (twoj + twomp)//2 * (twoj + 1) + (twoj + twom) //2

@numba.njit(numba.f8(numba.i8, numba.i8, numba.i8),
           cache=True, fastmath=True, nogil=True)
def _Wigner_coefficient(twoj, twomp, twom):
    return _Wigner_coefficients[_Wigner_index(twoj, twomp, twom)]

@numba.njit(numba.f8(numba.f8,numba.f8,numba.f8),
            cache=True, fastmath=True, nogil=True)
def Wigner_coefficient(j,mp,m):
    return _Wigner_coefficient(round(2*j), round(2*mp), round(2*m))

Esempio n. 18
0
from numba import jit, njit, i8
import numpy as np


@njit(i8(i8, i8, i8), cache=True)
def modpow(a, p, m):
  r = 1
  while p:
    if p & 1: r = r * a % m
    a = a * a % m
    p >>= 1
  return r


@njit(i8[:](i8[:], i8[:]), cache=True)
def ntt(s, t):
  mod = 998244353
  pr = 5
  sl, tl = len(s), len(t)
  k = 1
  M = 2
  while M < sl + tl - 1:
    k += 1
    M *= 2
  w = np.zeros(M // 2, np.int64)
  y = np.zeros(M // 2, np.int64)

  def init():
    nonlocal w, y, M, mod, pr
    z = modpow(pr, (mod - 1) // M, mod)
    x = modpow(z, mod - 2, mod)
Esempio n. 19
0
from . import values as vl
import time
import os
import numpy as np
from numba import jit, f8, i8, b1, void


#張力判定用の関数(F=kΔx -> σ=EA/l Δx = E/l Δxなので現在の質点座標さえわかればいいはず + 操作距離の取得)
@jit(i8(f8[:, :]))
def calctension(vectors):
    ret = 0
    for i in range(1, len(vectors) - 1):
        if (vectors[i, 4] == 2):
            l_0 = vl.l_0_short
        else:
            l_0 = vl.l_0
        rM = ((vectors[i, 0] - vectors[i - 1, 0])**2 +
              (vectors[i, 1] - vectors[i - 1, 1])**2)**(1 / 2)
        s = vl.E / vl.l_0 * max(rM - l_0, 0)
        if (abs(s) > vl.sigma_par_p):
            ret = 1
            break
        elif (abs(s) < vl.sigma_par_m):
            ret = -1
            break
    return ret
Esempio n. 20
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 3/7/19
"""

import numba as nb


@nb.jit(nb.i8(nb.i8[:, :], nb.b1[:], nb.i8[:], nb.i8[:], nb.i8[:, :], nb.i8))
def processOutput(matches, hypothesis, x, y, backward_index, n_matches):
    """
    This one removes matches that are found after the limit has been hit,
    without considering the relative importance of each
    keeps all hypotheses
    """
    for i, j in enumerate(x):
        if j == -2: continue
        backidx = backward_index[i, j]
        if backidx == -1:
            if n_matches == matches.shape[0]:
                continue
            backward_index[i, j] = n_matches
            matches[n_matches] = (i, j)
            backidx = n_matches
            n_matches += 1
        hypothesis[backidx] = True
    for j, i in enumerate(y):
        if i == -1:
            backidx = backward_index[-1, j]
            if backidx == -1:
Esempio n. 21
0
    limit_i = ((veh_length - cum_length) // 50) - 0
    while i < limit_i:
        ms_layout += '-'
        i += 1

    return ms_layout


def layout_to_int(vehicle_layout):
    layout_int = []
    for letter in vehicle_layout:
        layout_int.append(ord(letter))
    return np.array(layout_int, dtype=int)


@nb.njit(nb.i8(nb.int8, nb.int8))
def d_s(a, b):
    """
    d_s is a map from A X A -> R^+ and is called the Substitution Map. In
    particular, d_s(a, b) is the distance associated with substituting
    b for a, a,b ∈ A. For all a ∈ A, d_s(a,a) is generally assigned the value
    zero, although this is not Mandatory.

    """
    tilde = 126  # ~ 126
    hyphen = 45  # - 45
    star = 42  # * 42
    return (0 if a == b else 0 if a == tilde and b == hyphen else  # '~' '-'
            0 if a == hyphen and b == tilde else  # '-' '~'
            0 if a == star and b == hyphen else  # '*' '-'
            0 if a == hyphen and b == star else  # '-' '*'
Esempio n. 22
0
    # print b.view()
    # msg_bytearray = bytearray(msg.data)
    # index = 0
    # for i in msg.data:
	    # b[index] = msg_bytearray[index]
	    # index = index + 1
    return b


def isSignalSignedType(signal):
    if signal._valuetype == '+':
	return False
    else:
	return True

@njit(numba.i8(numba.u8,numba.u1))
def twosComplement(number, signalsize):
    return (number - (1 << signalsize))

@njit(numba.u1(numba.u1))
def getArrayIdxFromStartBit(n):
    return (0 if (n+1)%8 == 0 else 8-((n+1)%8))  + (n/8)*8

def getFactorIsIntegerFromSignal(signal):
    return True if float(signal._factor).is_integer() else False

def getOffsetIsIntegerFromSignal(signal):
    return True if float(signal._offset).is_integer() else False

# @jit
def getSignalNumber(barray_unpacked, barray, start_bit, signalsize, isByteorderIntel, isValuetypeiSigned, factor, offset):
Esempio n. 23
0
import numpy as np
import numba

sys.setrecursionlimit(10**8)
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines

H, W = map(int, readline().split())
grid = np.zeros((H, W), dtype=np.int64)
for i in range(H):
    s = readline().decode("utf-8").rstrip()
    grid[i, :] = [c == "." for c in s]


@numba.njit(numba.i8(numba.i4, numba.i4, numba.typeof(grid)), cache=False)
def solve(H, W, grid):
    M = 10**9 + 7
    t = grid[0, :].copy()
    for c in range(1, W):
        t[c] = min(t[c], t[c - 1])
    for r in range(1, H):
        mask = grid[r, :]
        t[0] *= mask[0]
        for c in range(1, W):
            t[c] += t[c - 1]
            t[c] *= mask[c]
            t[c] %= M
    return t[-1]

Esempio n. 24
0
import numpy as np
import numba
import multiprocessing as mp
import time
from main import *
# NxN bang
# M moku
# board 0: blank, 1: white, -1: black


@numba.jit(numba.i8(numba.i1[:, :], numba.f8[:], numba.b1, numba.i8))
def getMove(board, weights, flag, depth):
    'flag: if this function is for idx or for score'
    if depth == 1:
        actions = np.array(np.where(board == 0))
        features = getFeatures(board, actions)
        if flag:
            return np.argmax(weights.dot(features.transpose()))
        else:
            return -np.max(weights.dot(features.transpose()))
    else:
        actions = np.array(np.where(board == 0))
        score = np.zeros(actions.shape[1])
        for i in range(actions.shape[1]):
            nextboard = board.copy()
            nextboard[actions[0, i], actions[1, i]] = 1
            nextboard = -nextboard
            score[i] = getMove(nextboard, weights, False, depth - 1)
        else:
            if flag:
                return np.argmax(score)
Esempio n. 25
0
def retract_by_idrec(kb, idrec):
    t_id, f_id, a_id = decode_idrec(idrec)  #negligible
    make_f_id_empty(kb.kb_data, i8(t_id), i8(f_id))  #3.6ms
    kb.kb_data.change_queue.add(idrec)