Esempio n. 1
0
def clear_class(cells, nodes, faces):

    cells.nodeid = []
    cells.volume = []
    cells.center = []
    cells.faceid = []
    cells.father = Dict()
    cells.son = Dict()
    cells.iadiv = Dict()
    cells.globalindex = OrderedDict()
    cells.cellfid = []
    cells.cellnid = []
    cells.nf = []

    faces.nodeid = []
    faces.name = []
    faces.cellid = []
    faces.normal = []
    faces.bound = 0
    faces.center = []
    faces.ghostcenter = []

    nodes.cellid = []
    nodes.name = []
    nodes.vertex = []
    nodes.globalindex = OrderedDict()
    nodes.halonid = []
    nodes.ghostcenter = []
Esempio n. 2
0
class Cells:
    nodeid = []
    faceid = []
    center = []
    volume = []
    cellfid = []
    nf = [] #normal sortante des faces
    cellnid = []
    globalindex = OrderedDict()
    father = Dict()
    son = Dict()
    iadiv = Dict()

    def __init__(self, nodeid, faceid, center, volume, father, son, iadiv, globalindex, cellfid, cellnid,
                 nf):
        self.nodeid = nodeid    # instance variable unique to each instance
        self.faceid = faceid
        self.center = center
        self.volume = volume
        self.father = father
        self.son = son
        self.iadiv = iadiv
        self.globalindex = globalindex
        self.cellfid = cellfid
        self.cellnid = cellnid
        self.nf = nf
Esempio n. 3
0
 def foo(k1, k2, v):
     d = Dict()
     z1 = Dict()
     z1[k1 + 1] = v + k1
     z2 = Dict()
     z2[k2 + 2] = v + k2
     d[k1] = z1
     d[k2] = z2
     return d
Esempio n. 4
0
def load_items(items):
    """
    Processes the Items loaded from the file extracting meta data around the vulnerability data.

    Args:
        items: (List[Item]) Data loaded from the vulnerability file

    Returns: (Tuple[Dict[int, int], List[int], Dict[int, int], List[Tuple[int, int]], List[int]])
             vulnerability dictionary, vulnerability IDs, areaperil to vulnerability index dictionary,
             areaperil ID to vulnerability index array, areaperil ID to vulnerability array
    """
    areaperil_to_vulns_size = 0
    areaperil_dict = Dict()
    vuln_dict = Dict()
    vuln_idx = 0
    for i in range(items.shape[0]):
        item = items[i]

        # insert the vulnerability index if not in there
        if item['vulnerability_id'] not in vuln_dict:
            vuln_dict[item['vulnerability_id']] = np.int32(vuln_idx)
            vuln_idx += 1

        # insert an area dictionary into areaperil_dict under the key of areaperil ID
        if item['areaperil_id'] not in areaperil_dict:
            area_vuln = Dict()
            area_vuln[item['vulnerability_id']] = 0
            areaperil_dict[item['areaperil_id']] = area_vuln
            areaperil_to_vulns_size += 1
        else:
            if item['vulnerability_id'] not in areaperil_dict[
                    item['areaperil_id']]:
                areaperil_to_vulns_size += 1
                areaperil_dict[item['areaperil_id']][
                    item['vulnerability_id']] = 0

    areaperil_to_vulns_idx_dict = Dict()
    areaperil_to_vulns_idx_array = np.empty(len(areaperil_dict),
                                            dtype=Index_type)
    areaperil_to_vulns = np.empty(areaperil_to_vulns_size, dtype=np.int32)

    areaperil_i = 0
    vulnerability_i = 0

    for areaperil_id, vulns in areaperil_dict.items():
        areaperil_to_vulns_idx_dict[areaperil_id] = areaperil_i
        areaperil_to_vulns_idx_array[areaperil_i]['start'] = vulnerability_i

        for vuln_id in sorted(
                vulns
        ):  # sorted is not necessary but doesn't impede the perf and align with cpp getmodel
            areaperil_to_vulns[vulnerability_i] = vuln_id
            vulnerability_i += 1
        areaperil_to_vulns_idx_array[areaperil_i]['end'] = vulnerability_i
        areaperil_i += 1

    return vuln_dict, areaperil_to_vulns_idx_dict, areaperil_to_vulns_idx_array, areaperil_to_vulns
Esempio n. 5
0
    def test_equals_on_list_with_dict_for_unequal_dicts(self):
        # https://github.com/numba/numba/issues/4879
        a, b = List(), Dict()
        b["a"] = 1
        a.append(b)

        c, d = List(), Dict()
        d["a"] = 2
        c.append(d)

        self.assertNotEqual(a, c)
Esempio n. 6
0
 def __init__(self, map_path, cost_weights, waypoints, directory, is_ego):
     self.limp_s = 0.
     self.is_limping = False
     self.is_ego = is_ego
     self.prev_traj = None
     self.prev_param = None
     self.prev_steer = 0.
     self.cost_weights = cost_weights
     self.waypoints = waypoints
     self.wheelbase = 0.3302
     self.max_reacquire = 10
     self.safe_speed = 2.5
     self.CORNER_ON = False
     self.track_lad = 1.0
     self.STEER_LP = 0.99
     self.CURVATURE_THRESH = 20.
     self.WINDOW_SIZE = 3.
     self.TOP_POP_NUM = 3
     lut_all = np.load(directory + 'mpc/lut_inuse.npz')
     self.lut_x = lut_all['x']
     self.lut_y = lut_all['y']
     self.lut_theta = lut_all['theta']
     self.lut_kappa = lut_all['kappa']
     self.lut = lut_all['lut']
     step_sizes = []
     step_sizes.append(self.lut_x[1] - self.lut_x[0])
     step_sizes.append(self.lut_y[1] - self.lut_y[0])
     step_sizes.append(self.lut_theta[1] - self.lut_theta[0])
     step_sizes.append(self.lut_kappa[1] - self.lut_kappa[0])
     self.lut_stepsizes = np.array(step_sizes)
     with open(directory + 'config.yaml', 'r') as yaml_stream:
         try:
             config = yaml.safe_load(yaml_stream)
             speed_lut_name = config['speed_lut_name']
             range_lut_name = config['range_lut_name']
         except yaml.YAMLError as ex:
             print(ex)
     speed_lut_temp = msgpack.unpack(open(directory + speed_lut_name, 'rb'),
                                     use_list=False)
     self.speed_lut_numba = Dict()
     for key, val in speed_lut_temp.items():
         if key == b'resolution':
             continue
         self.speed_lut_numba[key] = val
     range_lut_temp = msgpack.unpack(open(directory + range_lut_name, 'rb'),
                                     use_list=False)
     self.range_lut_numba = Dict()
     for key, val in range_lut_temp.items():
         if key == b'resolution':
             continue
         self.range_lut_numba[key] = val
     self.lut_resolution = float(speed_lut_temp[b'resolution'][0])
def enumerized_to_vectorized_legacy(enumerized_states, nominal_maps,
                                    number_backmap):
    '''
	enumerized_states : List<Dict<i8[:]>>
	'''
    elm_present = Dict()
    nominals = Dict()
    continuous = Dict()
    n_states = len(enumerized_states)
    for k, state in enumerate(enumerized_states):
        for typ, elms in state.items():
            nominal_map = nominal_maps[typ]
            for name, elm in elms.items():
                if (name not in elm_present):
                    elm_present[name] = np.zeros((n_states, ), dtype=np.uint8)
                elm_present[name][k] = True

                for i, attr in enumerate(elm):
                    if (nominal_map[i]):
                        tn = (name, i, attr)
                        if (tn not in nominals):
                            n_arr = np.zeros((n_states, ), dtype=np.uint8)
                            # n_arr.fill(255)
                            nominals[tn] = n_arr
                        nominals[tn][k] = True
                    else:
                        tc = (name, i)
                        if (tc not in continuous):
                            c_arr = np.empty((n_states, ), dtype=np.float64)
                            c_arr.fill(np.nan)
                            continuous[tc] = c_arr
                        continuous[tc][k] = number_backmap[attr]

    # Apparently filling it transposed and then transposing gives a fortran ordered array
    vect_nominals = np.empty((n_states, len(nominals)),
                             dtype=np.uint8)  #, order='F')
    vect_continuous = np.empty((n_states, len(continuous)),
                               dtype=np.float64)  #, order='F')

    for i, (tup, n_arr) in enumerate(nominals.items()):
        name, _, _ = tup
        # print(elm_present[name], np.where(elm_present[name], n_arr, 255) )
        vect_nominals[:, i] = np.where(elm_present[name], n_arr, 255)

    for i, c_arr in enumerate(continuous.values()):
        vect_continuous[:, i] = c_arr

    return vect_nominals, vect_continuous
Esempio n. 8
0
    def impl(data, wavelet, mode="symmetric", axis=None):
        if not have_axis:
            axis = List(range(data.ndim))

        paxis = promote_axis(axis, data.ndim)
        naxis = len(paxis)
        pmode = promote_mode(mode, naxis)
        pwavelets = [discrete_wavelet(w) for w
                     in promote_wavelets(wavelet, naxis)]

        coeffs = List([("", data)])

        for a, (ax, m, wv) in enumerate(zip(paxis, pmode, pwavelets)):
            new_coeffs = List()

            for subband, x in coeffs:
                ca, cd = dwt_axis(x, wv, m, ax)
                new_coeffs.append((subband + "a", ca))
                new_coeffs.append((subband + "d", cd))

            coeffs = new_coeffs

        dict_coeffs = Dict()

        for name, coeff in coeffs:
            dict_coeffs[name] = coeff

        return dict_coeffs
Esempio n. 9
0
def group_index(index, group_by, return_dict=False, nb_compatible=False, assert_sorted=False):
    """Group index by some mapper.

    By default, returns an array of group indices pointing to the original index, and the new index.
    Set `return_dict` to `True` to return a dict instead of array.
    Set `nb_compatible` to `True` to make the dict Numba-compatible (Dict out of arrays).
    Set `assert_sorted` to `True` to verify that group indices are increasing.
    """
    group_by = group_by_to_index(index, group_by)
    group_arr, new_index = pd.factorize(group_by)
    if not isinstance(new_index, pd.Index):
        new_index = pd.Index(new_index)
    if isinstance(group_by, pd.MultiIndex):
        new_index.names = group_by.names
    elif isinstance(group_by, (pd.Index, pd.Series)):
        new_index.name = group_by.name
    if assert_sorted:
        if not is_sorted(group_arr):
            raise ValueError("Group indices are not increasing. Use .sort_values() on the index.")
    if return_dict:
        groups = dict()
        for i, idx in enumerate(group_arr):
            if idx not in groups:
                groups[idx] = []
            groups[idx].append(i)
        if nb_compatible:
            numba_groups = Dict()
            for k, v in groups.items():
                numba_groups[k] = np.array(v)
            return numba_groups, new_index
        return groups, new_index
    return group_arr, new_index
Esempio n. 10
0
 def test_getitem(self):
     # Test __getitem__
     d = Dict()
     d[1] = 2
     # It's typed now
     self.assertTrue(d._typed)
     self.assertEqual(d[1], 2)
Esempio n. 11
0
    def groupby_apply(self, by, apply_func_nb, *args, on_matrix=False, **kwargs):
        """See `vectorbt.generic.nb.groupby_apply_nb` and
        `vectorbt.generic.nb.groupby_apply_matrix_nb` for `on_matrix=True`.

        For `by`, see `pd.DataFrame.groupby`.

        Example:
            ```python-repl
            >>> mean_nb = njit(lambda col, i, a: np.nanmean(a))
            >>> df.vbt.groupby_apply([1, 1, 2, 2, 3], mean_nb)
                 a    b    c
            1  1.5  4.5  1.5
            2  3.5  2.5  2.5
            3  5.0  1.0  1.0
            >>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
            >>> df.vbt.groupby_apply([1, 1, 2, 2, 3], mean_matrix_nb, on_matrix=True)
                      a         b         c
            1  2.500000  2.500000  2.500000
            2  2.833333  2.833333  2.833333
            3  2.333333  2.333333  2.333333
            ```"""
        checks.assert_numba_func(apply_func_nb)

        regrouped = self._obj.groupby(by, axis=0, **kwargs)
        groups = Dict()
        for i, (k, v) in enumerate(regrouped.indices.items()):
            groups[i] = np.asarray(v)
        if on_matrix:
            result = nb.groupby_apply_matrix_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        else:
            result = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        return self.wrap_reduced(result, index=list(regrouped.indices.keys()))
Esempio n. 12
0
 def test_setdefault(self):
     # Test setdefault(k, d)
     d = Dict()
     d.setdefault(1, 2)
     # It's typed now
     self.assertTrue(d._typed)
     self.assertEqual(d[1], 2)
Esempio n. 13
0
def _MCS(node: int, A: np.ndarray, out: np.ndarray) -> np.ndarray:
    i = 1
    n = A.shape[0]
    while i < n:
        if i == 1:
            numbering = np.array([node])
            X = np.array([i for i in range(n)])
            # Caching neighbors
            neighbors = Dict()
            for j in range(n):
                neighbors[X[j]] = _neighbors(X[j], A)
        i += 1
        X = difference(X, numbering)
        x = X.shape[0]
        vmax = -1
        pmax = -1
        for j in range(x):
            k = len(intersection(neighbors[X[j]], numbering))
            if vmax < k:
                vmax = k
                pmax = j
        numbering = np.append(numbering, [X[pmax]])
        nodes = intersection(neighbors[X[pmax]], numbering[:i])
        if _add_missing_edges(nodes, A, out):
            i = 1
Esempio n. 14
0
    def groupby_apply(self, by, apply_func_nb, *args, on_matrix=False, **kwargs):
        """See `vectorbt.timeseries.nb.groupby_apply_nb` and 
        `vectorbt.timeseries.nb.groupby_apply_matrix_nb` for `on_matrix=True`.

        For `by`, see [pandas.DataFrame.groupby](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html).

        Example:
            ```python-repl
            >>> mean_nb = njit(lambda col, i, a: np.nanmean(a))
            >>> print(df.vbt.timeseries.groupby_apply([1, 1, 2, 2, 3], 
            ...     mean_nb))
                 a    b    c
            1  1.5  4.5  1.5
            2  3.5  2.5  2.5
            3  5.0  1.0  1.0

            >>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
            >>> print(df.vbt.timeseries.groupby_apply([1, 1, 2, 2, 3], 
            ...     mean_matrix_nb, on_matrix=True))
                      a         b         c
            1  2.500000  2.500000  2.500000
            2  2.833333  2.833333  2.833333
            3  2.333333  2.333333  2.333333
            ```"""
        checks.assert_numba_func(apply_func_nb)

        regrouped = self._obj.groupby(by, axis=0, **kwargs)
        groups = Dict()
        for i, (k, v) in enumerate(regrouped.indices.items()):
            groups[i] = np.asarray(v)
        if on_matrix:
            result = nb.groupby_apply_matrix_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        else:
            result = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        return self.wrap_array(result, index=list(regrouped.indices.keys()))
Esempio n. 15
0
 def foo():
     l = List()
     d = Dict()
     d[0] = 0
     # d.keys() provides a DictKeysIterableType
     l.extend(d.keys())
     return l
Esempio n. 16
0
        def foo(k, v):
            d = Dict()
            if k:
                d[k] = v
            else:
                d[0xDEAD] = v + 1

            return d
Esempio n. 17
0
        def foo(k, v):
            d = Dict()
            if k:
                d[k] = v
            else:
                d[0xdead] = v + 1

            return d
Esempio n. 18
0
        def foo(define):
            d = Dict()
            ct = len(d)
            for k, v in d.items():
                ct += v

            if define:
                # This will set the type
                d[1] = 2
            return ct, d, len(d)
Esempio n. 19
0
def FF_corr(facet_dict, skip_scatter = False):
    """Facet-Facet correlation function.

    Parameters
    ----------
    facet_dict : dict
        node-facet(list) pair dictionary.
    scatter : bool, optional
        If True, the scatter plot for facet-facet size correlation scatter, by default True.
    
    Returns
    ----------
    corr : float
        The facet-facet correlation number
    ff_pair : dict
        the dictionary whose key is facet-facet pair and value is its degeneracy.

    """    
    ff_corr = {}
    ff_pair = Dict()
    ff_pair[1,1] = 1 # type set of numba typed Dict
    del ff_pair[1,1] # remove dummy data
    tot_corr = 0
    tot_N = 0
    for node in tqdm(facet_dict):
        fs = List()
        for facet in facet_dict[node]:
            fs.append(len(facet)-1) # gathering all of facet sizes of target node.
        
        if skip_scatter:
            tot_corr += _FFcor(fs)
            tot_N += (len(fs)-1)*(len(fs))/2
            continue
        else:
            corr = _FFcor(fs, ff_pair)
            ff_corr[node] = corr

    if skip_scatter:
        return tot_corr/tot_N

    pairs=  []
    weight = []
    for pair in ff_pair:
        pairs.append(pair)
        weight.append(ff_pair[pair])
    pairs = np.array(pairs)
    corr = 0
    for node in ff_corr:
        corr+= ff_corr[node]
    corr /= sum(weight)
    plt.scatter(*pairs.T, s = np.log10(weight)+1, label = f'corr. = {corr}')
    plt.legend()
    return corr, ff_pair
Esempio n. 20
0
def count_ngrams(string_list: List[str], n: numba.int32):
    all_counts = List()
    for s in string_list:
        padded = "$" * (n - 1) + s + "$" * (n - 1)
        counts = Dict()
        for i in range(len(padded) - n + 1):
            k = padded[i : i + n]
            if k in counts:
                counts[k] += 1
            else:
                counts[k] = 1
        all_counts.append(counts)
    return all_counts
Esempio n. 21
0
    def resample_apply(self,
                       freq,
                       apply_func_nb,
                       *args,
                       on_matrix=False,
                       **kwargs):
        """See `vectorbt.timeseries.nb.groupby_apply_nb` and 
        `vectorbt.timeseries.nb.groupby_apply_matrix_nb` for `on_matrix=True`.

        For `freq`, see `pandas.DataFrame.resample`.

        Example:
            ```python-repl
            >>> mean_nb = njit(lambda col, i, a: np.nanmean(a))
            >>> print(df.vbt.timeseries.resample_apply('2d', mean_nb))
                          a    b    c
            2018-01-01  1.5  4.5  1.5
            2018-01-03  3.5  2.5  2.5
            2018-01-05  5.0  1.0  1.0

            >>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
            >>> print(df.vbt.timeseries.resample_apply('2d', 
            ...     mean_matrix_nb, on_matrix=True))
                               a         b         c
            2018-01-01  2.500000  2.500000  2.500000
            2018-01-03  2.833333  2.833333  2.833333
            2018-01-05  2.333333  2.333333  2.333333
            ```"""
        checks.assert_numba_func(apply_func_nb)

        resampled = self._obj.resample(freq, axis=0, **kwargs)
        groups = Dict()
        for i, (k, v) in enumerate(resampled.indices.items()):
            groups[i] = np.asarray(v)
        if on_matrix:
            result = nb.groupby_apply_matrix_nb(self.to_2d_array(), groups,
                                                apply_func_nb, *args)
        else:
            result = nb.groupby_apply_nb(self.to_2d_array(), groups,
                                         apply_func_nb, *args)
        result_obj = self.wrap_array(result,
                                     index=list(resampled.indices.keys()))
        resampled_arr = np.full(
            (resampled.ngroups, self.to_2d_array().shape[1]), np.nan)
        resampled_obj = self.wrap_array(resampled_arr,
                                        index=pd.Index(list(
                                            resampled.groups.keys()),
                                                       freq=freq))
        resampled_obj.loc[result_obj.index] = result_obj.values
        return resampled_obj
Esempio n. 22
0
def compute_criterion(w, cells, faces):

    grad = compute_gradient(w, cells.nodeid, faces.cellid, faces.name,
                            faces.normal, cells.volume)
    maxu = max(grad)

    tol = 0.0025 * maxu
    criterion = Dict()  #[0]*len(cells.nodeid)

    for i in range(len(cells.nodeid)):
        if (grad[i] > tol):
            criterion[i] = 1

    return criterion
Esempio n. 23
0
 def test_storage_model_mismatch(self):
     # https://github.com/numba/numba/issues/4520
     # check for storage model mismatch in refcount ops generation
     dct = Dict()
     ref = [
         ("a", True, "a"),
         ("b", False, "b"),
         ("c", False, "c"),
     ]
     # populate
     for x in ref:
         dct[x] = x
     # test
     for i, x in enumerate(ref):
         self.assertEqual(dct[x], x)
Esempio n. 24
0
    def _read_block_dense(
        self, block_idx, tile_block_size, min_per_file, max_per_file, fileset,
        slices, ranges, scheme_indices, shape_prods, out_decoded, r_n_d,
        sig_dims, ds_shape, need_clear, native_dtype, corrections,
    ):
        """
        Reads a block of tiles, starting at `block_idx`, having a size of
        `tile_block_size` read range entries.
        """
        # phase 1: read
        buffers = Dict()
        for fileno in min_per_file.keys():
            fh = fileset[fileno]
            read_size = max_per_file[fileno] - min_per_file[fileno]
            # FIXME: re-use buffers
            buffers[fileno] = np.zeros(read_size, dtype=np.uint8)
            # FIXME: file header offset handling is a bit weird
            # FIXME: maybe file header offset should be folded into the read ranges instead?
            fh.seek(min_per_file[fileno] + fh._file_header)
            fh.readinto(buffers[fileno])

        # phase 2: decode tiles from the data that was read
        for idx in range(block_idx, block_idx + tile_block_size):
            origin = slices[idx, 0]
            shape = slices[idx, 1]
            tile_ranges = ranges[idx]
            scheme_idx = scheme_indices[idx]
            out_cut = out_decoded[:shape_prods[idx]].reshape((shape[0], -1))

            data = r_n_d(
                idx,
                buffers, sig_dims, tile_ranges,
                out_cut, native_dtype, do_zero=need_clear,
                origin=origin, shape=shape, ds_shape=ds_shape,
                offsets=min_per_file,
            )
            tile_slice = Slice(
                origin=origin,
                shape=Shape(shape, sig_dims=sig_dims)
            )
            data = data.reshape(shape)
            self.preprocess(data, tile_slice, corrections)
            yield DataTile(
                data,
                tile_slice=tile_slice,
                scheme_idx=scheme_idx,
            )
Esempio n. 25
0
def _perfect_numbering(node: int, A: np.ndarray) -> np.ndarray:
    # Perfect numbering using the Maximum Cardinality Search
    n = A.shape[0]
    neighbors = Dict()
    numbering = np.array([node])
    X = np.array([i for i in range(n)])
    for i in range(n):
        # Caching neighbors sets
        neighbors[X[i]] = _neighbors(X[i], A)
    for i in range(1, n):
        X = difference(X, numbering)
        x = X.shape[0]
        vmax = -1
        pmax = -1
        for j in range(x):
            k = len(intersection(neighbors[X[j]], numbering))
            if vmax < k:
                vmax = k
                pmax = j
        numbering = np.append(numbering, [X[pmax]])
    return numbering
Esempio n. 26
0
 def test_check_untyped_dict_ops(self):
     # Check operation on untyped dictionary
     d = Dict()
     self.assertFalse(d._typed)
     self.assertEqual(len(d), 0)
     self.assertEqual(str(d), str({}))
     self.assertEqual(list(iter(d)), [])
     # Test __getitem__
     with self.assertRaises(KeyError) as raises:
         d[1]
     self.assertEqual(str(raises.exception), str(KeyError(1)))
     # Test __delitem__
     with self.assertRaises(KeyError) as raises:
         del d[1]
     self.assertEqual(str(raises.exception), str(KeyError(1)))
     # Test .pop
     with self.assertRaises(KeyError):
         d.pop(1)
     self.assertEqual(str(raises.exception), str(KeyError(1)))
     # Test .pop
     self.assertIs(d.pop(1, None), None)
     # Test .get
     self.assertIs(d.get(1), None)
     # Test .popitem
     with self.assertRaises(KeyError) as raises:
         d.popitem()
     self.assertEqual(str(raises.exception),
                      str(KeyError('dictionary is empty')))
     # Test setdefault(k)
     with self.assertRaises(TypeError) as raises:
         d.setdefault(1)
     self.assertEqual(
         str(raises.exception),
         str(TypeError('invalid operation on untyped dictionary')),
     )
     # Test __contains__
     self.assertFalse(1 in d)
     # It's untyped
     self.assertFalse(d._typed)
Esempio n. 27
0
 def ParseNumbaIn(self, values_in):
     values_out = {}
     for key, val in values_in.items():
         if "_List" in key:
             key = key.replace("_List", "")
             if len(val) == 0:
                 dtype_str = str(val.dtype)
                 dtype = getattr(types, dtype_str)
                 values_out[key] = List.empty_list(dtype)
             else:
                 values_out[key] = List(val)
         elif "_Dict" in key:
             key = key.replace("_Dict", "")
             d = Dict()
             dtype_str = val.pop("dtype")
             dtype = getattr(np, dtype_str)
             for k, v in val.items():
                 d[k] = dtype(v)
             values_out[key] = d
         else:
             values_out[key] = val
     return values_out
Esempio n. 28
0
 def test_dict_create_no_jit_using_Dict(self):
     with override_config('DISABLE_JIT', True):
         with forbid_codegen():
             d = Dict()
             self.assertEqual(type(d), dict)
Esempio n. 29
0
 def foo(x):
     d = Dict()
     d[0] = x
     d[1] = Bag(101)
     return d
Esempio n. 30
0
 def foo():
     d = Dict()
     return d