def ema( y: 'np.ndarray[float64]', alpha: optional(float64) = None, ylast: optional(float64) = None, ) -> 'np.ndarray[float64]': r''' Exponential weighted moving average owka 'Exponential smoothing'. - https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average - https://en.wikipedia.org/wiki/Exponential_smoothing Fun facts: A geometric progression is the discrete version of an exponential function, that is where the name for this smoothing method originated according to statistics lore. In signal processing parlance, an EMA is a first order IIR filter. .. math:: .tex {S_{t}={\begin{cases}Y_{1},&t=1 \\\alpha Y_{t}+(1-\alpha )\cdot S_{t-1},&t>1\end{cases}}} .nerd (2) s = { s[0] = y[0]; t = 0 s[t] = a*y[t] + (1-a)*s[t-1], t > 0. } More discussion here: https://stackoverflow.com/questions/42869495/numpy-version-of-exponential-weighted-moving-average-equivalent-to-pandas-ewm ''' n = y.shape[0] if alpha is None: # https://en.wikipedia.org/wiki/Moving_average#Relationship_between_SMA_and_EMA # use the "center of mass" convention making an ema compare # directly to the com of a SMA or WMA: alpha = 2 / float(n + 1) s = np.empty(n, dtype=float64) if n == 1: s[0] = y[0] * alpha + ylast * (1 - alpha) else: if ylast is None: s[0] = y[0] else: s[0] = ylast for i in range(1, n): s[i] = y[i] * alpha + s[i - 1] * (1 - alpha) return s
def generic_resolve(self, tpe, attr): if attr == "identities": if tpe.identitiestpe == numba.none: return numba.optional( identities.IdentitiesType(numba.int32[:, :])) else: return tpe.identitiestpe
def test_deferred_type(self): node_type = deferred_type() spec = OrderedDict() spec['data'] = float32 spec['next'] = optional(node_type) @njit def get_data(node): return node.data @jitclass(spec) class LinkedNode(object): def __init__(self, data, next): self.data = data self.next = next def get_next_data(self): # use deferred type as argument return get_data(self.next) def append_to_tail(self, other): cur = self while cur.next is not None: cur = cur.next cur.next = other node_type.define(LinkedNode.class_type.instance_type) first = LinkedNode(123, None) self.assertEqual(first.data, 123) self.assertIsNone(first.next) second = LinkedNode(321, first) first_meminfo = _get_meminfo(first) second_meminfo = _get_meminfo(second) self.assertEqual(first_meminfo.refcount, 3) self.assertEqual(second.next.data, first.data) self.assertEqual(first_meminfo.refcount, 3) self.assertEqual(second_meminfo.refcount, 2) # Test using deferred type as argument first_val = second.get_next_data() self.assertEqual(first_val, first.data) # Check setattr (issue #2606) self.assertIsNone(first.next) second.append_to_tail(LinkedNode(567, None)) self.assertIsNotNone(first.next) self.assertEqual(first.next.data, 567) self.assertIsNone(first.next.next) second.append_to_tail(LinkedNode(678, None)) self.assertIsNotNone(first.next.next) self.assertEqual(first.next.next.data, 678) # Check ownership self.assertEqual(first_meminfo.refcount, 3) del second, second_meminfo self.assertEqual(first_meminfo.refcount, 2)
def generic_resolve(self, tpe, attr): if attr == "index": return tpe.indextpe elif attr == "content": return tpe.contenttpe elif attr == "identities": if tpe.identitiestpe == numba.none: return numba.optional( identity.IdentitiesType(numba.int32[:, :])) else: return tpe.identitiestpe
def test_deferred_type(self): node_type = deferred_type() spec = OrderedDict() spec['data'] = float32 spec['next'] = optional(node_type) @njit def get_data(node): return node.data @jitclass(spec) class LinkedNode(object): def __init__(self, data, next): self.data = data self.next = next def get_next_data(self): # use deferred type as argument return get_data(self.next) node_type.define(LinkedNode.class_type.instance_type) first = LinkedNode(123, None) self.assertEqual(first.data, 123) self.assertIsNone(first.next) second = LinkedNode(321, first) first_meminfo = _get_meminfo(first) second_meminfo = _get_meminfo(second) self.assertEqual(first_meminfo.refcount, 3) self.assertEqual(second.next.data, first.data) self.assertEqual(first_meminfo.refcount, 3) self.assertEqual(second_meminfo.refcount, 2) # Test using deferred type as argument first_val = second.get_next_data() self.assertEqual(first_val, first.data) # Check ownership self.assertEqual(first_meminfo.refcount, 3) del second, second_meminfo self.assertEqual(first_meminfo.refcount, 2)
# ############# Random signal generation ############# # @njit def rand_choice_nb(arr, prob): """ https://github.com/numba/numba/issues/2539 :param arr: A 1D numpy array of values to sample from. :param prob: A 1D numpy array of probabilities for the given samples. :return: A random sample from the given array with a given probability. """ return arr[np.searchsorted(np.cumsum(prob), np.random.random(), side="right")] @njit(b1[:, :](b1[:, :], optional(i8)), cache=True) def shuffle_nb(a, seed=None): """Shuffle along first axis.""" if seed is not None: np.random.seed(seed) b = np.full_like(a, np.nan) for col in range(a.shape[1]): b[:, col] = np.random.permutation(a[:, col]) return b @njit def random_by_func_nb(shape, choice_func_nb, seed, *args): """Generate random signals based on function.""" if seed is not None: np.random.seed(seed)
resizes the tree no matter what (no test is performed here). Parameters ---------- tree : TreeType The tree capacity : int The new desired capacity (maximum number of nodes it can contain) of the tree. """ tree.nodes = resize(tree.nodes, capacity) tree.y_pred = resize2d(tree.y_pred, capacity, zeros=True) tree.capacity = capacity @jit(void(TreeType, optional(uintp)), nopython=True, nogil=True) def resize_tree(tree, capacity=None): """Resizes and updates the tree to have the required capacity. By default, it doubles the current capacity of the tree if no capacity is specified. Parameters ---------- tree : TreeType The tree capacity : int or None The new desired capacity (maximum number of nodes it can contain) of the tree. If None, then it doubles the capacity of the tree. """ if capacity is None: if tree.capacity == 0:
``linkedlist.py``. Here, we make a better interface in the Stack class that encapsuate the underlying linked-list. """ from __future__ import print_function, absolute_import from collections import OrderedDict from numba import njit from numba import deferred_type, intp, optional from numba.core.runtime import rtsys from numba.experimental import jitclass linkednode_spec = OrderedDict() linkednode_type = deferred_type() linkednode_spec['data'] = data_type = deferred_type() linkednode_spec['next'] = optional(linkednode_type) @jitclass(linkednode_spec) class LinkedNode(object): def __init__(self, data): self.data = data self.next = None linkednode_type.define(LinkedNode.class_type.instance_type) stack_spec = OrderedDict() stack_spec['head'] = optional(linkednode_type) stack_spec['size'] = intp
from numba import njit, float64, int64, optional from numba.experimental import jitclass import numpy as np from .cluster import cluster from .nodes import SumNode, ProdNode, Leaf, GaussianLeaf, MultinomialLeaf, fit_gaussian, fit_multinomial from .utils import get_indep_clusters, isin @jitclass([ ('thr', float64), ('nclusters', int64), ('max_height', int64), ('ncat', int64[:]), ('classcol', optional(int64)), ('minstd', float64), ('smoothing', float64), ]) class LearnSPN: """ Learning method based on Gens and Domingos' LearnSPN. Attributes ---------- thr: float p-value threshold for independence tests in product nodes. nclustes: int Number of clusters in sum nodes. max_height: int Maximum height (depth) of the network. ncat: numpy array
x2 = (0.000002966 * rmax_nmi * rmax_nmi) - (0.000090532 * rmax_nmi) - 0.0010373287 x3 = (-0.0000000592 * rmax_nmi * rmax_nmi) + (0.0000019826 * rmax_nmi) - 0.0000020198 c = (9.7043566341 * math.log(rmax_nmi)) - 2.7295806689 phi = (x3 * ((r_nmi_use - r_phi_max)**3)) + (x2 * ( (r_nmi_use - r_phi_max)**2)) + (x1 * (r_nmi_use - r_phi_max)) + c if 130 < r_nmi < 360: # justification on NWS23 pdf page 287 page 263 delta_phi = linear_interpolation(r_nmi, 130, 360, phi, (phi - 2)) phi += delta_phi elif 360 <= r_nmi: phi -= 2 return phi @jit(signature_or_function=(numba.optional(numba.double), numba.double, numba.double, numba.double, numba.int64, numba.double, numba.double, numba.optional(numba.double), numba.optional(numba.double), numba.optional(numba.double)), nopython=True, cache=True) def calc_windspeed(cp_mb, r_nmi, lat_deg, fspeed_kts, rmax_nmi, angle_to_center, track_heading, pw_kpa,
else: for i in (m, -m): yield L, i ##################### # Basis set classes # ##################### shell_type = deferred_type() @jitclass([('L', int64), ('nprim', int64), ('ncont', int64), ('spherical', boolean), ('gaussian', boolean), ('alphas', float64[:]), ('_coef', float64[:]), ('rs', optional(int64[:])), ('ns', optional(int64[:]))]) class Shell(object): """The primary object used for all things basis set related. Due to limitations in numba, contraction coefficients are stored as a 1D-array and reshaped when calling contract methods. Args: coef (np.ndarray): 1D-array of contraction coefficients alphas (np.ndarray): 1D-array of primitive exponents nprim (int): number of primitives in the shell ncont (int): number of contracted functions in the shell L (int): angular momentum quantum number spherical (bool): whether angular momentum is expanded in linearly independent set gaussian (bool): whether exponential dependence is r or r2 rs (np.ndarray): 1D-array of radial exponents (default None) ns (np.ndarray): additional normalization factors (default None)
Here, we implement a binarytree and iterative preorder and inorder traversal function using a handwritten stack. """ from __future__ import print_function, absolute_import import random from collections import OrderedDict from numba import njit from numba import jitclass from numba import int32, deferred_type, optional from numba.runtime import rtsys node_type = deferred_type() spec = OrderedDict() spec['data'] = int32 spec['left'] = optional(node_type) spec['right'] = optional(node_type) @jitclass(spec) class TreeNode(object): def __init__(self, data): self.data = data self.left = None self.right = None node_type.define(TreeNode.class_type.instance_type) stack_type = deferred_type()
from typing import AsyncIterator, Optional import numpy as np from numba import jit, float64, optional, int64 from ..data._normalize import iterticks # TODO: things to figure the f**k out: # - how to handle non-plottable values # - composition of fsps / implicit chaining @jit( float64[:]( float64[:], optional(float64), optional(float64) ), nopython=True, nogil=True ) def ema( y: 'np.ndarray[float64]', alpha: optional(float64) = None, ylast: optional(float64) = None, ) -> 'np.ndarray[float64]': r"""Exponential weighted moving average owka 'Exponential smoothing'. - https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average - https://en.wikipedia.org/wiki/Exponential_smoothing
@njit( Tuple(types=( Tuple(types=(int64, int64)), int64, float64, ))( int64[::1], int64[::1], int64[::1], int64, int64, int64, int64, optional(int64[::1]), optional(int64[::1]), optional(int64[::1]), optional(int64[::1]), int64[:, ::1], float64[:, ::1], float64[:, ::1], float64[:, ::1], float64[:, :, ::1], optional(float64[:, :, ::1]), float64, optional(int64[:, :, ::1]), optional(uint8[:, :, ::1]), optional(int64[::1]), boolean, boolean,
from numba.experimental import jitclass import numba as nb import numpy as np from .signed import (signed, signed_max, signed_min, signed_max_vec, signed_min_vec, signed_prod, signed_sum, signed_sum_vec, signed_econtaminate, signed_join) from .utils import (bincount, logtrunc_phi, isin, isin_arr, lse, logsumexp2, logsumexp3, nb_argmax, nb_argsort, categorical, sample_trunc_phi) node_type = deferred_type() spec = OrderedDict() spec['id'] = int64 spec['left_child'] = optional(node_type) # first child spec['right_child'] = optional(node_type) # last child spec['sibling'] = optional(node_type) # next sibling spec['nchildren'] = int64 spec[ 'scope'] = int64[:] # Indices of the variables in the support of the node's pdf spec['type'] = types.unicode_type spec['n'] = float64 # Number of datapoints spec['w'] = optional(float64[:]) # Sum node weights spec['logw'] = optional(float64[:]) # Log of sum node weights spec['tempw'] = optional( float64[:]) # Temporary weitghts for conditional sampling spec['comparison'] = int64 # Type of comparison (relevant for leaf nodes only) spec['value'] = float64[:] # Threshold for comparison (leaf nodes only) spec['mean'] = float64 # Relevant for gaussian nodes only
from vectorbt.decorators import * from vectorbt.widgets import FigureWidget from vectorbt.timeseries import TimeSeries, _expanding_max_1d_nb, _pct_change_1d_nb, _ffill_1d_nb from numba.types.containers import UniTuple from numba import njit, f8, i8, b1, optional import numpy as np import pandas as pd import plotly.graph_objects as go __all__ = ['Signals'] # ############# Numba functions ############# # @njit(b1[:, :](UniTuple(i8, 2), i8, optional(i8), optional(i8)), cache=True) # 1.17 ms vs 56.4 ms for vectorized def generate_random_entries_nb(shape, n, every_nth, seed): """Randomly generate entry signals.""" if seed is not None: np.random.seed(seed) if every_nth is None: every_nth = 1 a = np.full(shape, False, dtype=b1) for i in range(shape[1]): idxs = np.random.choice(np.arange(shape[0])[::every_nth], size=n, replace=False) a[idxs, i] = True return a
node.parent = None node = None def delete_tree(tree): delete(tree.root) node_type = deferred_type() @jitclass([ ('id', int64), # Unique (random) id ('counts', int64[:]), # Class counts of the data reaching the node. ('idx', int64[:]), # Indices of the samples reaching the node. ('split', optional(Split.class_type.instance_type)), # Split object ('parent', optional(node_type)), ('left_child', optional(node_type)), ('right_child', optional(node_type)), ('isleaf', optional(nb.boolean)), ('depth', int16), ]) class TreeNode: """ Class defining each node in a Decision Tree. """ def __init__(self, id, counts, parent, idx, isleaf): self.id = id self.counts = counts self.parent = parent self.idx = idx
'_header': tag.TagHeader, '_buffer': memoryview, })(bam.Tag) """Reference = numba.jitclass({ 'name' : str, 'length' : int, 'index' : int, '_optional' : dict, })(reference.Reference)""" PackedCIGAR = numba.jitclass({ 'buffer': memoryview, })(packed_cigar.PackedCIGAR) PackedSequence = numba.jitclass({ 'buffer': memoryview, '_length': int, })(packed_sequence.PackedSequence) Record = numba.jitclass({ '_header': bam.record.RecordHeader, '_name': numba.optional(bytearray), '_cigar': numba.optional(PackedCIGAR), '_sequence': numba.optional(PackedSequence), '_quality_scores': numba.optional(numba.char[:]), '_tags': numba.optional(Tag[:]), '_reference': numba.optional(reference.Reference), '_next_reference': numba.optional(reference.Reference), })(bam.Record)
elif MERGE_DATA == DATA_MAX: _merge_indirect_max(contrib, data_in, data_out) # NOT equivaled to: #data_out[contrib] = np.maximum(data_in, data_out[contrib]) elif MERGE_DATA == DATA_NANMAX: data_out[contrib] = np.fmax(data_in, data_out[contrib]) else: raise ValueError("Invalid MERGE_DATA flag.") from .flags import DATA_NANFIRST __DEFAULT_MERGE_ACTION = DATA_NANFIRST @numba.njit( (numba.optional(numba.float32[:, :]), numba.boolean[:], numba.optional(numba.float32[:, :]), numba.optional(numba.uint8[:]))) def merge_data_indirect(data_in, contrib, existing_data, MERGE_ACTION=None): """ Arguments --------- data_in : [None] | 2d float32 matrix Input dataset contrib : 1d bool array The size of this array must match the lowest dimension shape of existing_data, if existing_data is not None. This indicates where values from data in should be extracted. The number of True values in this array should be IF data_in is None then this argument is ignored. It must still be passed in (cannot be None) due to typing restrictions. (a numba indexer cannot be optional and this
Here, we make a better interface in the Stack class that encapsuate the underlying linked-list. """ from __future__ import print_function, absolute_import from numba.utils import OrderedDict from numba import njit from numba import jitclass from numba import deferred_type, intp, optional from numba.runtime import rtsys linkednode_spec = OrderedDict() linkednode_type = deferred_type() linkednode_spec['data'] = data_type = deferred_type() linkednode_spec['next'] = optional(linkednode_type) @jitclass(linkednode_spec) class LinkedNode(object): def __init__(self, data): self.data = data self.next = None linkednode_type.define(LinkedNode.class_type.instance_type) stack_spec = OrderedDict() stack_spec['head'] = optional(linkednode_type) stack_spec['size'] = intp
node_type = deferred_type() spec = [ #('bounds', float64[:]), ('size', float64), #('points', float64[:,:]), #('masses', float64[:]), ('Npoints', int64), ('mass', float64), ('COM', float64[:]), ('center', float64[:]), ('IsLeaf', boolean), ('HasChild', boolean[:]), #('children', list) ('child0', optional(node_type)), ('child1', optional(node_type)), ('child2', optional(node_type)), ('child3', optional(node_type)), ('child4', optional(node_type)), ('child5', optional(node_type)), ('child6', optional(node_type)), ('child7', optional(node_type)) ] @jitclass(spec) class BHTree(object): def __init__(self, center, size): self.center = center
) def buffers_as_arrays(sa): buffers = sa.buffers() return ( _extract_isnull_bitmap(sa, 0, len(sa)), np.asarray(buffers[1]).view(np.uint32), np.asarray(buffers[2]).view(np.uint8), ) @numba.experimental.jitclass([ ("missing", numba.uint8[:]), ("offsets", numba.uint32[:]), ("data", numba.optional(numba.uint8[:])), ("offset", numba.int64), ]) class NumbaStringArray: """Wrapper around arrow's StringArray for use in numba functions. Usage:: NumbaStringArray.make(array) """ def __init__(self, missing, offsets, data, offset): self.missing = missing self.offsets = offsets self.data = data self.offset = offset
Parameters ---------- tree : TreeClassifier or TreeRegressor The tree to be resized capacity : int The new desired capacity (maximum number of nodes it can contain) of the tree """ tree.nodes = resize(tree.nodes, capacity) tree.y_pred = resize(tree.y_pred, capacity, zeros=True) tree.capacity = capacity @jit( [ void(TreeClassifierType, optional(uintp)), void(TreeRegressorType, optional(uintp)), ], nopython=True, nogil=True, ) def resize_tree(tree, capacity=None): """Resizes and updates the tree to have the required capacity if necessary. By default, it doubles the current capacity of the tree if no capacity is specified and set it to 3 if the tree is empty. Parameters ---------- tree : TreeClassifier or TreeRegressor The tree to be resized
sqlite3_result_double.argtypes = c_void_p, c_double sqlite3_result_double.restype = None sqlite3_result_int64.argtypes = c_void_p, c_int64 sqlite3_result_int64.restype = None sqlite3_result_int.argtypes = c_void_p, c_int sqlite3_result_int.restype = None sqlite3_result_null.argtypes = c_void_p, sqlite3_result_null.restype = None RESULT_SETTERS = { optional(float64): sqlite3_result_double, optional(int64): sqlite3_result_int64, optional(int32): sqlite3_result_int, float64: sqlite3_result_double, int64: sqlite3_result_int64, int32: sqlite3_result_int, } value_methods = { 'blob': c_void_p, 'bytes': c_int, 'double': c_double, 'int': c_int, 'int64': c_int64, 'text': POINTER(c_ubyte),
"parent": intp, "depth": uintp, "is_left": boolean, "impurity": float32, "start_train": uintp, "end_train": uintp, "start_valid": uintp, "end_valid": uintp, "min_samples_split": uintp, "min_impurity_split": float32, "is_leaf": boolean, "bin": uint64, "feature": uintp, "found_split": boolean, "is_split_categorical": boolean, "bin_partition": optional(uint64[::1]), "bin_partition_size": uint64, "threshold": float32, "w_samples_valid": float32, "pos_train": uintp, "pos_valid": uintp, "aggregation": boolean, "step": float32, "node_count": intp, }, ) def grow( tree, tree_context, node_context, compute_node_context,
self.gain = gain self.feature_idx = feature_idx self.bin_idx = bin_idx self.gradient_left = gradient_left self.hessian_left = hessian_left self.gradient_right = gradient_right self.hessian_right = hessian_right self.n_samples_left = n_samples_left self.n_samples_right = n_samples_right @jitclass([ ('n_features', uint32), ('binned_features', uint8[::1, :]), ('n_bins', uint32), ('min_samples_leaf', optional(uint32)), ('min_gain_to_split', float32), ('all_gradients', float32[::1]), ('all_hessians', float32[::1]), ('ordered_gradients', float32[::1]), ('ordered_hessians', float32[::1]), ('sum_gradients', float32), ('sum_hessians', float32), ('constant_hessian', uint8), ('constant_hessian_value', float32), ('l2_regularization', float32), ('min_hessian_to_split', float32), ('partition', uint32[::1]), ('left_indices_buffer', uint32[::1]), ('right_indices_buffer', uint32[::1]), ])
@numba.experimental.jitclass([ ('origin', numba.float64[3::1]), ('direction', numba.float64[3::1]), ('inv_direction', numba.float64[3::1]), ('sign', numba.uint8[3::1]), ('color', numba.float64[3::1]), ('local_color', numba.float64[3::1]), ('i', numba.int32), ('j', numba.int32), ('bounces', numba.int32), ('p', numba.float64), ('local_p', numba.float64), ('G', numba.float64), ('prev', numba.optional(ray_type)), ('normal', numba.float64[3::1]), ('material', numba.int64), ('hit_light', numba.boolean), ]) class Ray: def __init__(self, origin, direction): # todo: I don't think any of these copies are necessary and i'd like to try removing them when otherwise stable self.origin = origin.copy() self.direction = direction.copy() self.inv_direction = 1 / self.direction self.sign = (self.inv_direction < 0).astype(np.uint8) self.color = WHITE.copy() self.local_color = WHITE.copy() self.i = 0 self.j = 0
a = lbs[ind] b = ubs[ind] _x = 2*(x-a)/(b-a) - 1.0 return _numba_chbevl(_x, cs[ind]) else: return np.nan return func ################################################################################ # This is slow for now :( leaf_type = numba.deferred_type() leaf_spec = OrderedDict() leaf_spec['a'] = numba.float64 leaf_spec['b'] = numba.float64 leaf_spec['ancestor'] = numba.optional(leaf_type) leaf_spec['m'] = numba.float64 leaf_spec['ind'] = numba.int64 leaf_spec['parent'] = numba.boolean leaf_spec['left'] = numba.optional(leaf_type) leaf_spec['right'] = numba.optional(leaf_type) @numba.jitclass(leaf_spec) class Leaf(object): def __init__(self, a, b, ancestor, ind): self.a = a self.b = b self.ancestor = ancestor self.m = (self.a + self.b)/2.0 self.ind = ind self.parent = False
This example demonstrates jitclasses and deferred types for writing a singly-linked-list. """ from __future__ import print_function, absolute_import from collections import OrderedDict import numpy as np from numba import njit from numba import jitclass from numba import int32, deferred_type, optional from numba.runtime import rtsys node_type = deferred_type() spec = OrderedDict() spec['data'] = int32 spec['next'] = optional(node_type) @jitclass(spec) class LinkedNode(object): def __init__(self, data, next): self.data = data self.next = next def prepend(self, data): return LinkedNode(data, self) @njit def make_linked_node(data): return LinkedNode(data, None)
import sqlite3 import random import pytest from slumba import create_function, sqlite_udf from numba import int64, float64, optional @sqlite_udf(float64(float64)) def add_one(x): return x + 1.0 @sqlite_udf(optional(float64)(float64)) def add_one_optional(x): return x + 1.0 if x is not None else None @sqlite_udf(int64(int64, float64)) def add_each_other(x, y): return x + int(y) @pytest.fixture def con(): con = sqlite3.connect(':memory:') con.execute(""" CREATE TABLE t ( id INTEGER PRIMARY KEY, key VARCHAR(1),
"""Evacuation related functions""" import numba import numpy as np from numba.typing.typeof import typeof from crowddynamics.core.geom2D import line_intersect from crowddynamics.core.sensory_region import is_obstacle_between_points from crowddynamics.core.structures import obstacle_type_linear from crowddynamics.core.vector2D import length from numba import i8, f8, optional from crowddynamics.simulation.agents import NO_TARGET @numba.jit(f8(f8, f8, optional(f8), f8), nopython=True, nogil=True, cache=True) def narrow_exit_capacity(d_door, d_agent, d_layer=None, coeff=1.0): r""" Capacity estimation :math:`\beta` of unidirectional flow through narrow bottleneck. Capacity of the bottleneck increases in stepwise manner. Estimation 1 Simple estimation .. math:: \beta_{simple} = c \left \lfloor \frac{d_{door}}{d_{agent}} \right \rfloor Estimation 2 More sophisticated estimation [Hoogendoorn2005a]_, [Seyfried2007a]_ .. math:: \beta_{hoogen} = c \left \lfloor \frac{d_{door} - (d_{agent} - d_{layer})}{d_{layer}} \right \rfloor,\quad d_{door} \geq d_{agent}
# -*- coding:utf8 -*- from numba import jit, optional, intp ''' class numba.optional(typ ) 根据底层Numba类型typ创建一个可选类型。可选类型将允许typ或的任何值None ''' @jit((optional(intp),)) def f(x): return x is not None print(f(0))
import numba as nb import bampy.bgzf.zlib as zlib from bampy.bgzf.zlib import DEFAULT_COMPRESSION_LEVEL, Z_BEST_SPEED from bampy.mt import CACHE_JIT raw_decompress = nb.jit(nb.types.Tuple( (nb.intc, zlib.zState))(nb.optional(memoryview), nb.optional(memoryview), nb.intc, nb.optional(zlib.zState), nb.intc, nb.optional(bytes)), locals={ 'err': nb.intc, 'state': zlib.zState }, nopython=True, nogil=True, cache=CACHE_JIT)(zlib.raw_decompress) raw_compress = nb.jit(nb.types.Tuple( (nb.intc, zlib.zState))(nb.optional(memoryview), nb.optional(memoryview), nb.intc, nb.optional(zlib.zState), nb.intc, nb.intc, nb.intc, nb.optional(bytes)), locals={ 'err': nb.intc, 'state': zlib.zState }, nopython=True, nogil=True, cache=CACHE_JIT)(zlib.raw_compress)