예제 #1
0
def _masked_array_type_from_col(col):
    """
    Return a type representing a tuple of arrays,
    the first element an array of the numba type
    corresponding to `dtype`, and the second an
    array of bools representing a mask.
    """
    nb_scalar_ty = numpy_support.from_dtype(col.dtype)
    if col.mask is None:
        return nb_scalar_ty[::1]
    else:
        return Tuple((nb_scalar_ty[::1], libcudf_bitmask_type[::1]))
예제 #2
0
def _construct_signature(frame, return_type, args):
    """
    Build the signature of numba types that will be used to
    actually JIT the kernel itself later, accounting for types
    and offsets. Skips columns with unsupported dtypes.
    """

    # Tuple of arrays, first the output data array, then the mask
    return_type = Tuple((return_type[::1], boolean[::1]))
    offsets = []
    sig = [return_type, int64]
    for col in _supported_cols_from_frame(frame).values():
        sig.append(_masked_array_type_from_col(col))
        offsets.append(int64)

    # return_type, size, data, masks, offsets, extra args
    sig = void(*(sig + offsets + [typeof(arg) for arg in args]))

    return sig
예제 #3
0
def create_signature(retty, args):
    """
    Given the return type and arguments for a libdevice function, return the
    signature of the stub function used to call it from CUDA Python.
    """

    # Any pointer arguments should be part of the return type.
    return_types = [arg.ty for arg in args if arg.is_ptr]
    # If the return type is void, there is no point adding it to the list of
    # return types.
    if retty != void:
        return_types.insert(0, retty)

    if len(return_types) > 1:
        retty = Tuple(return_types)
    else:
        retty = return_types[0]

    argtypes = [arg.ty for arg in args if not arg.is_ptr]

    return signature(retty, *argtypes)
예제 #4
0
from numba import njit, int64, float64
from numba.typed import List as L
from numba.types import Tuple, ListType as LT
import numpy as np


@njit(Tuple((int64[:,::1],LT(LT(int64)),LT(LT(int64))))(int64[:,::1], int64, int64), cache=True)
def compute_surface_mesh_adjs(edges, num_vertices, edges_per_face):
        
    num_faces = edges.shape[0]//edges_per_face
    adjs =  np.zeros((num_faces, edges_per_face), dtype=np.int64)-1
    vtx2vtx = L()
    vtx2face = L()
        
    for k in range(num_vertices):
        tmp1 = L()
        tmp2 = L()
        tmp1.append(-1)
        tmp2.append(-1)
        vtx2vtx.append(tmp1)
        vtx2face.append(tmp2)
        
    tmp = np.arange(num_faces)
    faces_idx = np.repeat(tmp, edges_per_face)
        
    map_ = dict()
    support_set = set()
    map_[(-1,-1)] = -1
    support_set.add((-1,-1))
        
    for i in range(edges.shape[0]):
예제 #5
0
"Multicopter dynamics"

import numpy as np
import numba as nb

from .. import quat
from ..utis import env_param, polyval, cross

# Common type definitions
from numba.types import Tuple, none
StateT = nb.float64[::1]
ActionT = nb.float64[::1]
TimeDeltaT = nb.float64
StateArrayT = nb.float64[:, ::1]
ActionArrayT = nb.float64[:, ::1]
DerivativesT = Tuple((StateArrayT, ActionArrayT))

eps = 1e-9
radius = 0.2  # m
mass = 1.3  # kg
num_rotors = 4
rpm_max = 20000
rotor_rpm_coeff = 6e1
friction_force = 1e-2
friction_torque = 1e-5
gravity = np.r_[0.0, 0.0, 9.82]

# I = mk^2 where k is the radius of gyration
inertia = mass * (0.275)**2
arm_length = radius
rotor_angles = 2 * np.pi * (np.r_[0:num_rotors] - 1 / 2) / num_rotors
예제 #6
0
        samples in each label class

    Returns
    -------
    output : float
        Gini impurity criterion in the node
    """
    y_sum_sq = 0.0
    w_samples_sq = w_samples * w_samples
    for k in range(n_classes):
        y_sum_sq += y_sum[k] * y_sum[k]
    return 1.0 - y_sum_sq / w_samples_sq


@jit(
    Tuple((float32, float32))(float32, float32, float32, float32[::1], float32[::1]),
    nopython=NOPYTHON,
    nogil=NOGIL,
    boundscheck=BOUNDSCHECK,
    fastmath=FASTMATH,
    locals={
        "y_sum_left_sq": float32,
        "y_sum_right_sq": float32,
        "w_samples_left_sq": float32,
        "w_samples_right_sq": float32,
        "gini_left": float32,
        "gini_right": float32,
    },
)
def gini_childs(n_classes, w_samples_left, w_samples_right, y_sum_left, y_sum_right):
    """Computes the gini impurity criterion in both left and right child nodes of a
예제 #7
0
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from collections import Iterable
from pandas.api.extensions import ExtensionDtype, take
from pandas.compat import set_function_name
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
from pandas.core import ops
from pandas.core.dtypes.dtypes import registry
from pandas.core.dtypes.common import is_list_like, is_integer_dtype
import sparse
import operator
import numpy as np
from numba import jit
from numba.types import float64, Tuple, int64, void


@jit(Tuple((int64[:, :], float64[:], int64))(int64[:], float64[:], int64,
                                             int64, float64),
     parallel=True,
     nogil=True,
     nopython=True)
#@jit(parallel=True, nogil=True, nopython=True)
def _setitem(coords, data, shape, key, value):
    # TODO: This seems like a very slow way to do it
    data_iter = 0
    key_copied = False
    if value == 0.0:
        return np.expand_dims(coords, 0), data, shape
    newlen = data.shape[0] + 1
    new_coords = np.empty((1, newlen), dtype=np.int64)
    new_data = np.empty(newlen)
    for i in range(newlen):
        if data_iter >= 0:
예제 #8
0
        # Compute the best bin and gain proxy obtained for the feature
        find_best_split_along_feature(tree_context, node_context, feature, f,
                                      candidate_split)
        # If we found a candidate split along the feature
        if candidate_split.found_split:
            # And if it's better than the current one
            if candidate_split.gain_proxy >= best_gain_proxy:
                # Then we replace the best current split
                copy_split(candidate_split, best_split)
                best_gain_proxy = candidate_split.gain_proxy
        f += 1


@jit(
    [
        Tuple((uintp, uintp))(TreeClassifierContextType, SplitClassifierType,
                              uintp, uintp, uintp, uintp),
        Tuple((uintp, uintp))(TreeRegressorContextType, SplitRegressorType,
                              uintp, uintp, uintp, uintp),
    ],
    nopython=True,
    nogil=True,
    locals={
        "feature": uintp,
        "bin_threshold": uint8,
        "Xf": uint8[:],
        "left_buffer": uintp[::1],
        "right_buffer": uintp[::1],
        "partition_train": uintp[::1],
        "partition_valid": uintp[::1],
        "n_samples_train_left": uintp,
        "n_samples_train_right": uintp,
from cre.utils import _struct_from_meminfo, _meminfo_from_struct, _cast_structref, cast_structref, decode_idrec, lower_getattr, _struct_from_pointer, lower_setattr, lower_getattr, _pointer_from_struct
from cre.caching import gen_import_str, unique_hash, import_from_cached, source_to_cache, source_in_cache
from cre.condition_node import Conditions, ConditionsType, initialize_conditions

from cre.var import *
from cre.predicate_node import GenericAlphaPredicateNodeType, GenericBetaPredicateNodeType

from operator import itemgetter
from copy import copy

i8_arr = i8[::1]
i8_arr_2d = i8[:, ::1]
list_i8_arr = ListType(i8_arr)
# list_i8_arr_2d = ListType(optional(i8_arr_2d))

i8_i8_arr_tuple = Tuple((i8, i8[:, ::1]))
list_i8_i8_arr_tuple = ListType(i8_i8_arr_tuple)

i8_x2 = UniTuple(i8, 2)


@njit(cache=True)
def filter_alpha(term, inds):
    png = cast_structref(GenericAlphaPredicateNodeType, term.pred_node)
    mi = _meminfo_from_struct(png)
    return png.filter_func(mi, term.link_data, inds, term.negated)


@njit(cache=True)
def filter_beta(term, l_inds, r_inds):
    png = cast_structref(GenericBetaPredicateNodeType, term.pred_node)
예제 #10
0
r"""Distance functions for potentials.

Some of the functions also compute rotational moments for computing torque like

.. math::
   \mathbf{M} = \mathbf{r}_{\mathrm{moment}} \times (\mathbf{f}_{}^{soc} + \mathbf{f}_{}^{c})

"""
import numba
import numpy as np
from numba import float64
from numba.types import Tuple, UniTuple
from crowddynamics.core.vector2D import length, rotate90, dot


@numba.jit([Tuple((float64, float64[:]))(float64[:], float64,
                                         float64[:], float64)],
           nopython=True, nogil=True, cache=True)
def distance_circles(x0, r0, x1, r1):
    r"""
    Skin-to-Skin distance :math:`h`  with normal :math:`\mathbf{\hat{n}}`
    between two circles.

    .. math::

       h &= \|\mathbf{x}_0 - \mathbf{x}_1\| - (r_0 + r_1) \\
       \mathbf{\hat{n}} &= \frac{\mathbf{x}_0 - \mathbf{x}_1}{\|\mathbf{x}_0 - \mathbf{x}_1\|}

    Args:
        x0 (numpy.ndarray):
        r0 (float):
        x1 (numpy.ndarray):
예제 #11
0
    count = tree.nodes.counts[idx_node, c]
    return n_samples == count


@njit(uint32(TreeClassifier.class_type.instance_type, uint32, float32[::1]))
def node_get_child(tree, idx_node, x_t):
    feature = tree.nodes.feature[idx_node]
    threshold = tree.nodes.threshold[idx_node]
    if x_t[feature] <= threshold:
        return tree.nodes.left[idx_node]
    else:
        return tree.nodes.right[idx_node]


@njit(
    Tuple((float32, float32))(TreeClassifier.class_type.instance_type, uint32,
                              uint32))
def node_range(tree, idx_node, j):
    # TODO: do the version without memory...
    if tree.nodes.n_samples[idx_node] == 0:
        raise RuntimeError("Node has no range since it has no samples")
    else:
        return (
            tree.nodes.memory_range_min[idx_node, j],
            tree.nodes.memory_range_max[idx_node, j],
        )


@njit(
    float32(TreeClassifier.class_type.instance_type, uint32, float32[::1],
            float32[::1]))
def node_compute_range_extension(tree, idx_node, x_t, extensions):
예제 #12
0
    # Using loops since the numpy functions such as `numpy.isin` and `numpy.intersect1d` are not supported by numba
    s_xy = 0.
    for i in x:
        for j in y:
            if i == j:
                s_xy += 1.
                break

    cs = s_xy / ((s_x * s_y) ** 0.5)
    # Clip values to the range `[-1, 1]`, the domain of arc-cosine
    dist = np.arccos(max(-1., min(1., cs)))

    return dist


@njit(Tuple((int64[:, :], float64[:, :]))(int64[:, :], float64[:, :]))
def remove_self_neighbors(index_neighbors_, distance_neighbors_):
    """
    Given the index and distances of k nearest neighbors of a list of query points, remove points from their
    own neighbor list.

    :param index_neighbors_: numpy array of the index of `k` neighbors for a list of points. Has shape `(n, k)`,
                             where `n` is the number of query points.
    :param distance_neighbors_: numpy array of the distance of `k` neighbors for a list of points.
                                Also has shape `(n, k)`.

    :return: (index_neighbors, distance_neighbors), where each of them has shape `(n, k - 1)`.
    """
    n, k = index_neighbors_.shape
    index_neighbors = np.zeros((n, k - 1), dtype=index_neighbors_.dtype)
    distance_neighbors = np.zeros((n, k - 1), dtype=distance_neighbors_.dtype)
예제 #13
0
    """

    x_0 = np.roll(img, -1, axis=1).T
    x_1 = np.roll(img, 1, axis=1).T
    y_0 = np.roll(img, -1, axis=0).T
    y_1 = np.roll(img, 1, axis=0).T

    # we do a lot of transposing before and after here because sums in the
    # energy function happen along the first dimension by default when we
    # want them to be happening along the last (summing the colors)
    en_map = sum(pow((x_0 - x_1), 2) + pow((y_0 - y_1), 2)).T

    return en_map


@njit(Tuple((Array(int64, 2, 'A'), int64))(Array(uint8, 2, 'A')))
def cumulative_energy(energy):
    """
    https://en.wikipedia.org/wiki/Seam_carving#Dynamic_programming

    Parameters
    ==========
    energy: 2-D numpy.array(uint8)
        Produced by energy_map

    Returns
    =======
        tuple of 2 2-D numpy.array(int64) with shape (height, width).
        paths has the x-offset of the previous seam element for each pixel.
        path_energies has the cumulative energy at each pixel.
    """
예제 #14
0
    Since numba's jitclass support for class inheritance is inexistant, we use this
    function both in the constructors of SplitClassifier and SplitRegressor.

    Parameters
    ----------
    split : SplitClassifier or SplitRegressor
        The split to be initialized by this function
    """
    split.bins = np.empty(max_n_bins, dtype=np.uint64)
    split.bin_partition = np.empty(max_n_bins, dtype=np.uint64)
    reset_split(split)


@jit(
    Tuple((float32, float32))(
        uint64, float32, float32, float32, float32[::1], float32[::1]
    ),
    nopython=NOPYTHON,
    nogil=NOGIL,
    boundscheck=BOUNDSCHECK,
    fastmath=FASTMATH,
    locals={},
)
def apply_childs_impurity_function_clf(
    criterion,
    n_classes,
    w_samples_train_left,
    w_samples_train_right,
    y_sum_left,
    y_sum_right,
):
예제 #15
0
    for each_node in top_layer:
        for e in graph.indices[graph.indptr[each_node]:graph.indptr[each_node + 1]]:
            directConnectionScore[e] +=1
    directConnectionScore = np.log(directConnectionScore+2)
              
    # get node attribute score
    beta = 0.25
    node_score = beta * np.log10(np.max(graph.node_attr)/graph.node_attr)
    
              
    contentscore = node_score + directConnectionScore

    return contentscore


@njit(Tuple((int32, float32))(DecSparseGraphSpec(), int32[:]), nogil=True)
def vertex_nomination__kernel__(G, seeds):
    
    # context score : computed via distance map from multi BFS
    bfs_results = multi_bfs(G, seeds)
    context_sim = 1/bfs_results

    # content score : placeholder computed via a random number generator
    content_sim = getContentScore(G, seeds)

    # fusion score : content_sim * context_sim 
    fusion_score = np.multiply(content_sim, context_sim)

    # remove original seeds from ranking
    for seed in seeds:
        fusion_score[seed] = np.float64(-1)
예제 #16
0
    Parameters
    ----------
    records : Records
        A records dataclass containing the stack of node records

    Returns
    -------
    output : bool
        Returns True if there are remaining records in the stack, False otherwise
    """
    return records.top > 0


@jit(
    Tuple((intp, uintp, boolean, float32, uintp, uintp, uintp, uintp))(RecordsType),
    nopython=NOPYTHON,
    nogil=NOGIL,
    boundscheck=BOUNDSCHECK,
    fastmath=FASTMATH,
    locals={"stack_top": record_type},
)
def pop_node_record(records):
    """Pops (removes and returns) a node record from the stack of records.

    Parameters
    ----------
    records : Records
        A records dataclass containing the stack of node records

    Returns
예제 #17
0
    s_len = 2**(l_max + 1)
    s = np.zeros(c.shape[:-1] + (s_len, ), dtype=c.dtype)

    pos_sums = np.arange(l_max, dtype=np.int64)
    pos_sums[:] = 2**(l_max - pos_sums)
    pos_sums = np.cumsum(pos_sums)

    gamma = np.zeros_like(c)

    return (x, y, ix, iy, vx, vy, unbiased, iy_reord, c, sx_c, sy_c, c_sum,
            l_max, s, pos_sums, gamma)


_get_impl_args_compiled = numba.njit(Tuple(
    (input_array, input_array, int64[:], int64[:], float64[:], float64[:],
     boolean, int64[:], float64[:, :], float64[:, :], float64[:, :],
     float64[:], int64, float64[:, :], int64[:], float64[:, :]))(input_array,
                                                                 input_array,
                                                                 boolean),
                                     cache=True)(_get_impl_args)

impls_dict = {
    CompileMode.AUTO:
    ((_get_impl_args_compiled, _distance_covariance_sqr_avl_impl_compiled),
     (_get_impl_args, _distance_covariance_sqr_avl_impl)),
    CompileMode.NO_COMPILE:
    ((_get_impl_args, _distance_covariance_sqr_avl_impl), ),
    CompileMode.COMPILE_CPU:
    ((_get_impl_args_compiled, _distance_covariance_sqr_avl_impl_compiled), )
}

예제 #18
0
파일: utils.py 프로젝트: tartaruszen/abda
 Tuple((float64[:, :, :], int64, int64))(
     #   D   N      W               X
     int64,
     int64,
     float64[:, :],
     float64[:, :],
     #   C           R    kest    Z
     uint8[:],
     int64[:],
     int64,
     float64[:, :, :],
     #   Breal           Bint                Bpos                Bcat                Bord
     float64[:, :, :],
     float64[:, :, :],
     float64[:, :, :],
     float64[:, :, :, :],
     float64[:, :, :],
     #   Bcount          u       wint    theta           thetal      thetah
     float64[:, :, :],
     float64[:, :, :],
     int64,
     float64[:, :],
     float64[:],
     float64[:],
     #   maxX        meanX    s2y      s2u      syd      n_samples
     float64[:],
     float64[:],
     float64,
     float64,
     float64,
     int64),
예제 #19
0
import numpy as np
from numba import jit
from numba.types import void, Tuple, int32, boolean, float32
from . import utils
from .planar_graph import PlanarGraph, planar_graph_nb_type


@jit(Tuple((boolean[:], boolean[:], int32[:]))(planar_graph_nb_type, int32[:],
                                               int32),
     nopython=True)
def get_tree_cycle_masks_and_next_edge_indices_in_path_to_cycle(
        graph, parent_edge_indices, non_tree_edge_index):

    cycle_vertices_mask = utils.repeat_bool(False, graph.size)
    cycle_edges_mask = utils.repeat_bool(False, graph.edges_count)

    next_edge_indices_in_path_to_cycle = utils.repeat_int(-1, graph.size)

    non_tree_edge_vertex1 = graph.edges.vertex1[non_tree_edge_index]
    non_tree_edge_vertex2 = graph.edges.vertex2[non_tree_edge_index]

    cycle_vertices_mask[non_tree_edge_vertex1] = True
    cycle_vertices_mask[non_tree_edge_vertex2] = True

    no_vertices_overlapped_yet = True

    for start_vertex in [non_tree_edge_vertex1, non_tree_edge_vertex2]:

        current_vertex = start_vertex
        current_edge_index = parent_edge_indices[current_vertex]
        chunk = 1
    elif n_cols % num_threads > 0:
        chunk = (n_cols // num_threads) + 1
    else:
        chunk = (n_cols // num_threads)

    for i in range(n_rows):
        for j in range((tid) * chunk, min(chunk * (tid + 1), n_cols)):
            tmp = 0.0
            for k in range(A.indptr[j], A.indptr[j + 1]):
                tmp += B[i, A.indices[k]] * A.data[k]
            C[i, j] = tmp
            #C[:,i] = np.dot(,A.data[A.indptr[i]:A.indptr[i+1]])


@njit(Tuple((int64[:], int64[:], int64[:]))(int64[:], int64[:]),
      nogil=True,
      pipeline_class=DEC_Pipeline)
def list_intersection(list1, list2):
    len1 = len(list1)
    len2 = len(list2)

    x = np.zeros(max(len1, len2), dtype=np.int64)
    xidx = np.zeros(max(len1, len2), dtype=np.int64)
    xjdx = np.zeros(max(len1, len2), dtype=np.int64)
    i, j, lenx = 0, 0, 0
    while (i < len1) and (j < len2):
        if list1[i] == list2[j]:
            x[lenx] = list1[i]
            xidx[lenx] = i
            xjdx[lenx] = j
예제 #21
0
파일: utils.py 프로젝트: DannyWeitekamp/ILP
#COPIED FROM NUMBERT EXPERIMENTAL BRANCH

from numba import types, njit, u1, u2, u4, u8, i8, i2, literally
from numba.types import Tuple, void
from numba.experimental.structref import _Utils, imputils
from numba.extending import intrinsic
from numba.core import cgutils
from llvmlite.ir import types as ll_types

#### idrec encoding ####


@njit(Tuple([u2, u8, u1])(u8), cache=True)
def decode_idrec(idrec):
    t_id = idrec >> 48
    f_id = (idrec >> 8) & 0x000FFFFF
    a_id = idrec & 0xF
    return (t_id, f_id, a_id)


@njit(u8(u2, u8, u1), cache=True)
def encode_idrec(t_id, f_id, a_id):
    return (t_id << 48) | (f_id << 8) | a_id


meminfo_type = types.MemInfoPointer(types.voidptr)


@intrinsic
def lower_setattr(typingctx, inst_type, attr_type, val_type):
    if (isinstance(attr_type, types.Literal)
예제 #22
0
파일: _split.py 프로젝트: imerad/wildwood
        # If we found a candidate split along the feature
        if candidate_split.found_split:
            # And if it's better than the current one
            if candidate_split.gain_proxy >= best_gain_proxy:
                # Then we replace the best current split
                copy_split(candidate_split, best_split)
                best_gain_proxy = candidate_split.gain_proxy
        f += 1

    # TODO: Compute the true information gain and save it somewhere ? But it's only
    #  useful for root ?
    return best_split


@jit(
    Tuple((uintp, uintp))(TreeContextType, SplitType, uintp, uintp, uintp, uintp),
    nopython=True,
    nogil=True,
    locals={
        "feature": uintp,
        "bin_threshold": uint8,
        "Xf": uint8[::1],
        "left_buffer": uintp[::1],
        "right_buffer": uintp[::1],
        "partition_train": uintp[::1],
        "partition_valid": uintp[::1],
        "n_samples_train_left": uintp,
        "n_samples_train_right": uintp,
        "n_samples_valid_left": uintp,
        "n_samples_valid_right": uintp,
        "pos_train": uintp,
예제 #23
0
import numpy as np
import sys, math
import multiprocessing
from numba import float64, float32, jit, boolean, vectorize, int64
from numba.types import UniTuple, Tuple, void

from bimodal_mixture_model import make_bimodal_params_numba, value_to_bimodal_prob_numba
from cluster_init import assign_cells_init, spectral_cluster_cosine, ward_cluster_cosine, spectral_cluster, boolean_spectral_cluster, nmf_init, kmeans_cluster
from likelihood import full_log_likelihood_single, greedy_optimize_i_numba_single, greedy_optimize_j_numba_single, sgn
import random
import pickle
from sklearn import model_selection


@jit(Tuple((float64[:], float64[:, :]))(float64[:, :], float64[:, :]),
     nopython=True)
def get_memo_params(
    i_cells: np.ndarray,
    j_genes: np.ndarray,
):
    z_count = np.dot(i_cells, j_genes.T)
    z = sgn(z_count)
    z_arr = np.sum(z, 0)
    return z_arr, z_count


def estimate_epi_data(i_cells: np.ndarray, j_genes: np.ndarray):
    z = np.dot(i_cells, j_genes.T).astype(bool).astype(float)
    z_total = np.sum(z, 0) * (1.0 / (i_cells.shape[0]))
    return z_total
@generated_jit(cache=True)
@overload(operator.eq)
def var_eq(left_var, right_var):
    return comparator_helper("==", left_var, right_var)


@generated_jit(cache=True)
@overload(operator.ne)
def var_ne(left_var, right_var):
    return comparator_helper("==", left_var, right_var, True)


## dnf_type ##
pterm_list_type = ListType(PTermType)
ab_conjunct_type = Tuple((pterm_list_type, pterm_list_type))
dnf_type = ListType(ab_conjunct_type)

## distr_dnf_type ##e
pterm_list_list_type = ListType(pterm_list_type)
distr_ab_conjunct_type = Tuple(
    (pterm_list_list_type, pterm_list_list_type, i8[:, :]))
distr_dnf_type = ListType(distr_ab_conjunct_type)

conditions_fields_dict = {
    ### Fields that are filled on in instantiation ###

    # The variables used by the condition
    'vars': ListType(GenericVarType),

    # The Disjunctive Normal Form of the condition but organized
예제 #25
0
import numpy as np
import scipy.stats as st
import matplotlib
import matplotlib.pyplot as plt
import time
from numba import jit, void, f8, i8 
from numba.types import UniTuple, Tuple
import datetime
from pathlib import Path

"""
Time step functions 
"""

# calc dt for host dynamics, creating integer number of steps within dt of bacterial dynamics
@jit(Tuple((f8, i8))(f8, f8[::1], f8), nopython=True)
def calc_dynamic_timestep(hostProp, dtVec, dtBac):
    # calculate smallest time step ever needed
    dt = mlsg.calc_max_time_step(hostProp, dtVec)
    if dt == dtVec.min():
        print("warning: min dt reached")
    # make sure that there is integer number of time steps
    numSubStep = int(np.ceil(dtBac / dt))
    dt = dtBac/numSubStep
    return (dt, numSubStep)

"""
Init functions 
"""
#initialize community  
def init_comm(model_par): 
    if data_test is None:
        labels_pred = knn_model.predict(data, is_train=True)
        # error rate
        mask = labels_pred != labels
        err_rate = float(mask[mask].shape[0]) / labels.shape[0]
    else:
        labels_pred = knn_model.predict(data_test, is_train=False)
        # error rate
        mask = labels_pred != labels_test
        err_rate = float(mask[mask].shape[0]) / labels_test.shape[0]

    return err_rate


@njit(Tuple((float64[:], float64[:, :]))(int64[:, :], int64[:], int64), fastmath=True)
def neighbors_label_counts(index_neighbors, labels_train, n_classes):
    """
    Given the index of neighboring samples from the training set and the labels of the training samples,
    find the label counts among the k neighbors and assign the label corresponding to the highest count as the
    prediction.

    :param index_neighbors: numpy array of shape `(n, k)` with the index of `k` neighbors of `n` samples.
    :param labels_train: numpy array of shape `(m, )` with the class labels of the `m` training samples.
    :param n_classes: (int) number of distinct classes.

    :return:
        - labels_pred: numpy array of shape `(n, )` with the predicted labels of the `n` samples. Needs to converted
                       to type `np.int` at the calling function. Numba is not very flexible.
        - counts: numpy array of shape `(n, n_classes)` with the count of each class among the `k` neighbors.
    """
예제 #27
0
from numba import njit, int64, float64
from numba.typed import List as L
from numba.types import Tuple, List, ListType as LT
import numpy as np


#edges, vtx2vtx, vtx2edge, vtx2poly, edge2vtx, edge2edge, edge2poly, poly2vtx, poly2edge, poly2poly   , LT(LT(int64)),LT(LT(int64)), LT(LT(int64)), int64[:,::1], LT(LT(int64)), LT(LT(int64)), int64[:,::1], int64[:,::1], int64[:,::1]
@njit(Tuple(
    (int64[:, ::1], LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), int64[:, ::1],
     LT(LT(int64)), LT(LT(int64)), int64[:, ::1], int64[:, ::1],
     int64[:, ::1]))(int64, int64[:, ::1]),
      cache=True)
def get_connectivity_info_surface(num_vertices, polys):

    vtx2vtx = L()
    vtx2edge = L()
    vtx2poly = L()
    edge2edge = L()
    edge2poly = L()

    for i in range(num_vertices):
        tmp1 = L()
        tmp2 = L()
        tmp3 = L()
        tmp1.append(-1)
        tmp2.append(-1)
        tmp3.append(-1)
        vtx2vtx.append(tmp1)
        vtx2edge.append(tmp2)
        vtx2poly.append(tmp3)
예제 #28
0
    tree_edges_mask[incident_edge_index] = True

_construct_tree_edges_mask = utils.make_traverse_graph_via_bfs(_add_edge_to_tree, boolean[:])

@jit(boolean[:](int32, planar_graph_nb_type), nopython=True)
def construct_bfs_tree_edges_mask(root, graph):

    tree_edges_mask = utils.repeat_bool(False, graph.edges_count)

    used_vertex_flags = utils.repeat_bool(False, graph.size)

    _construct_tree_edges_mask(root, graph, used_vertex_flags, tree_edges_mask)

    return tree_edges_mask

@jit(Tuple((int32[:], int32[:]))(planar_graph_nb_type, int32[:], int32, boolean[:]), nopython=True)
def _get_ordered_bfs_subtree_adjacencies_and_incidence_indices(graph, bfs_levels,
        subtree_max_level, bfs_tree_edges_mask):

    bfs_subtree_edges_mask = bfs_tree_edges_mask

    for edge_index in range(graph.edges_count):

        edge_vertex1 = graph.edges.vertex1[edge_index]
        edge_vertex2 = graph.edges.vertex2[edge_index]

        if max(bfs_levels[edge_vertex1], bfs_levels[edge_vertex2]) > subtree_max_level:
            bfs_subtree_edges_mask[edge_index] = False

    ordered_bfs_subtree_adjacencies_list = []
    ordered_bfs_subtree_incidence_indices_list = []
예제 #29
0
import numpy as np
from numba import njit, float64, int64
from numba.types import Tuple

@njit(Tuple((float64[:,::1], int64[:,::1]))(float64[:,::1],int64[:,::1]), cache=True)
def remove_duplicated_vertices(vertices, faces):
    
    vtx_dictionary = dict()
    support_set = set()
    vtx_dictionary[(-1.,-1.,-1.)] = -1.
    support_set.add((-1.,-1.,-1.))
    new_vertices = np.zeros(vertices.shape, dtype=np.float64)
    
    j=0
    for i in range(vertices.shape[0]):
        
        v = (vertices[i][0], vertices[i][1], vertices[i][2])
        
        if v not in support_set:
            
            vtx_dictionary[v] = i
            support_set.add(v)
            new_vertices[j] = vertices[i]
            j+=1
        
        else:
            idx = vtx_dictionary[v]
            r = np.where(faces==i)
            for k in zip(r[0], r[1]):
                faces[k[0]][k[1]] = idx
    
예제 #30
0
import numpy as np
from numba import njit, float64, int64
from numba.types import Tuple


@njit(Tuple((float64[:, ::1], int64[:, ::1]))(float64[:, ::1], int64[:, ::1]),
      cache=True)
def remove_duplicated_vertices(vertices, faces):

    vtx_dictionary = dict()
    support_set = set()
    vtx_dictionary[(-1., -1., -1.)] = -1.
    support_set.add((-1., -1., -1.))
    new_vertices = np.zeros(vertices.shape, dtype=np.float64)

    j = 0
    for i in range(vertices.shape[0]):

        v = (vertices[i][0], vertices[i][1], vertices[i][2])

        if v not in support_set:

            vtx_dictionary[v] = i
            support_set.add(v)
            new_vertices[j] = vertices[i]
            j += 1

        else:
            idx = vtx_dictionary[v]
            r = np.where(faces == i)
            for k in zip(r[0], r[1]):