Esempio n. 1
0
    def test_dict_of_dict_int_keyval(self):
        def inner_numba_dict():
            d = Dict.empty(
                key_type=types.intp,
                value_type=types.intp,
            )
            return d

        d = Dict.empty(
            key_type=types.intp,
            value_type=types.DictType(types.intp, types.intp),
        )

        def usecase(d, make_inner_dict):
            for i in range(100):
                mid = make_inner_dict()
                for j in range(i + 1):
                    mid[j] = j * 10000
                d[i] = mid
            return d

        got = usecase(d, inner_numba_dict)
        expect = usecase({}, dict)

        self.assertIsInstance(expect, dict)

        self.assertEqual(dict(got), expect)

        # Delete items
        for where in [12, 3, 6, 8, 10]:
            del got[where]
            del expect[where]
            self.assertEqual(dict(got), expect)
Esempio n. 2
0
 def test_copy_from_dict(self):
     expect = {k: float(v) for k, v in zip(range(10), range(10, 20))}
     nbd = Dict.empty(int32, float64)
     for k, v in expect.items():
         nbd[k] = v
     got = dict(nbd)
     self.assertEqual(got, expect)
Esempio n. 3
0
 def foo():
     d = Dict.empty(
         key_type=types.int32,
         value_type=types.unicode_type,
     )
     d[123] = "123"
     d[321] = "321"
     return d
Esempio n. 4
0
 def foo():
     d = Dict.empty(
         key_type=types.unicode_type,
         value_type=types.int32,
     )
     d["123"] = 123
     d["321"] = 321
     return d
Esempio n. 5
0
    def assert_disallow_key(self, ty):
        msg = '{} as key is forbidded'.format(ty)
        self.assert_disallow(msg, lambda: Dict.empty(ty, types.intp))

        @njit
        def foo():
            Dict.empty(ty, types.intp)
        self.assert_disallow(msg, foo)
Esempio n. 6
0
    def assert_disallow_value(self, ty):
        msg = '{} as value is forbidded'.format(ty)
        self.assert_disallow(msg, lambda: Dict.empty(types.intp, ty))

        @njit
        def foo():
            Dict.empty(types.intp, ty)
        self.assert_disallow(msg, foo)
Esempio n. 7
0
 def foo():
     # Make dictionary
     d = Dict.empty(
         key_type=types.unicode_type,
         value_type=float_array,
     )
     # Fill the dictionary
     d["posx"] = np.arange(3).astype(np.float64)
     d["posy"] = np.arange(3, 6).astype(np.float64)
     return d
Esempio n. 8
0
    def test_delitem(self):
        d = Dict.empty(types.int64, types.unicode_type)
        d[1] = 'apple'

        @njit
        def foo(x, k):
            del x[1]

        foo(d, 1)
        self.assertEqual(len(d), 0)
        self.assertFalse(d)
Esempio n. 9
0
        def foo(count):
            d = Dict.empty(
                key_type=types.intp,
                value_type=inner_dict_ty,
            )
            for i in range(count):
                d[i] = inner_numba_dict()
                for j in range(i + 1):
                    d[i][j] = j

            return d
def convert_to_typed_dict(G):

    edges_dict = Dict.empty(key_type=types.UniTuple(types.int64, 2),
                            value_type=types.float64)

    edges_dict.update({(edge[1], edge[0]): G.edges[edge]["weight"]
                       for edge in G.edges})
    edges_dict.update({(edge[0], edge[1]): G.edges[edge]["weight"]
                       for edge in G.edges})

    return edges_dict
Esempio n. 11
0
        def foo(count):
            d = Dict.empty(
                key_type=types.intp,
                value_type=inner_dict_ty,
            )
            for i in range(count):
                d[i] = inner_numba_dict()
                for j in range(i + 1):
                    d[i][j] = j

            return d
Esempio n. 12
0
	def _assert_record_type(self,typ):
		if(typ not in self.hist_structs):
			typ_cls = REGISTERED_TYPES[typ]


			#Type : (op_id, _hist, shape, arg_types, vmap)
			struct_typ = self.hist_structs[typ] = Tuple([i8,
										 i8[::1], i8[::1], ListType(unicode_type),
										 DictType(typ_cls,i8)])
			self.hists[typ] = self.hists.get(typ,Dict.empty(i8,ListType(struct_typ)))
		return self.hist_structs[typ]
Esempio n. 13
0
    def test_delitem(self):
        d = Dict.empty(types.int64, types.unicode_type)
        d[1] = 'apple'

        @njit
        def foo(x, k):
            del x[1]

        foo(d, 1)
        self.assertEqual(len(d), 0)
        self.assertFalse(d)
Esempio n. 14
0
	def _assert_declare_store(self,typ):
		struct_typ = self._assert_record_type(typ)
		typ_store = self.hists[typ]
		if(0 not in typ_store):
			typ_cls = REGISTERED_TYPES[typ]
			tsd = typ_store[0] = typ_store.get(0, List.empty_list(struct_typ))
			tl = List();tl.append(typ);
			vmap = Dict.empty(typ_cls,i8)
			#Type : (0 (i.e. no-op), _hist, shape, arg_types, vmap)
			tsd.append( tuple([0, np.empty((0,),dtype=np.int64),
					   np.empty((0,),dtype=np.int64), tl,vmap]) )
Esempio n. 15
0
    def __init__(self, jsonfile):
        with open(jsonfile) as fin:
            goldenjson = json.load(fin)

        self._masks = Dict.empty(key_type=types.uint32,
                                 value_type=types.uint32[:])

        for run, lumilist in goldenjson.items():
            mask = np.array(lumilist, dtype=np.uint32).flatten()
            mask[::2] -= 1
            self._masks[np.uint32(run)] = mask
Esempio n. 16
0
def np_nerve_complex_from(data):
    """Get nerve complex

    Returns:
        [type]: Dictionary which has (node - List of simplex(List)) as a key - value pair.
    """
    k = [int(ks) for ks in data]
    k.sort(reverse=True)
    facets = Dict.empty(*Scomplex_type)
    simps = set()
    simplices = Dict.empty(*Scomplex_type)
    nsimplex = 0
    for i in tqdm(k):
        for j in data[i]:
            simp = tuple(sorted(j))
            simpset = List(simp)
            if simp in simps:  #overlap check
                continue
            simps.add(simp)
            simplices[nsimplex + 1] = List(simp)
            nsimplex += 1
            facet = True
            #nfacet  = False
            for node in simp:
                faces = facets.get(node, List.empty_list(int32))
                if not faces or facet:
                    break  # this hyperedge is facet
                else:
                    for face in faces:
                        if len(set(simpset) - set(simplices[face])
                               ) == 0:  #if there is larger than this one
                            facet = False  #(i.e. this is a one of the face of an existing facet.)
                        break
                if not facet:
                    break
            if facet:
                for node in simp:
                    faces = facets.get(node, List.empty_list(int32))
                    faces.append(nsimplex)
                    facets[node] = faces
    return facets, simplices
Esempio n. 17
0
    def _load_variables(self):

        self._rsps = Dict.empty(key_type=types.unicode_type,
                                value_type=types.float64[:, :])

        for key, value in self._detector_group.items():

            if key[0] == "z":

                match = re.match("^z0*(\d+)_az0*(\d+)$", key)
                z, az = map(str, match.groups())

                self._rsps["%s_%s" % (az, z)] = np.ascontiguousarray(value[()])

        self.at_scat_data = np.ascontiguousarray(
            self._detector_group["at_scat_data"][()].astype("<f4"))
        self.e_in = self._detector_group["e_in"][()]
        self.lat_edge = self._detector_group["lat_edge"][()].astype("<f4")
        self.theta_edge = self._detector_group["theta_edge"][()]
        self.phi_edge = self._detector_group["phi_edge"][()]
        self.lat_cent = self._detector_group["lat_cent"][()]
        self.theta_cent = self._detector_group["theta_cent"][()]
        self.phi_cent = self._detector_group["phi_cent"][()]
        self.double_phi_cent = self._detector_group["double_phi_cent"][()]

        self.X = self._detector_group["X"][()]
        self.Y = self._detector_group["Y"][()]
        self.Z = self._detector_group["Z"][()]

        self.Azimuth = self._detector_group["Azimuth"][()].astype("<f4")
        self.Zenith = self._detector_group["Zenith"][()].astype("<f4")
        self.milliaz = np.array(
            [str(x) for x in self._detector_group["milliaz"][()]])
        self.millizen = np.array(
            [str(x) for x in self._detector_group["millizen"][()]])
        self.LIST = self._detector_group["LIST"][()]
        self.LPTR = self._detector_group["LPTR"][()]
        self.LEND = self._detector_group["LEND"][()]

        self.epx_lo = self._detector_group["epx_lo"][()].astype("<f4")
        self.epx_hi = self._detector_group["epx_hi"][()].astype("<f4")

        self.ichan = self._detector_group["ichan"][()]
        self.ienerg = self._detector_group["ienerg"][()]
        self.energ_lo = self._detector_group["energ_lo"][()]
        self.energ_hi = self._detector_group["energ_hi"][()]
        self.grid_points_list = np.array([
            np.cos(np.deg2rad(90 - self.Zenith)) *
            np.cos(np.deg2rad(self.Azimuth)),
            np.cos(np.deg2rad(90 - self.Zenith)) *
            np.sin(np.deg2rad(self.Azimuth)),
            np.sin(np.deg2rad(90 - self.Zenith)),
        ]).T
Esempio n. 18
0
    def sdc_indexes_build_map_positions_impl(self):
        indexer_map = Dict.empty(indexer_dtype, indexer_value_type)
        for i in range(len(self)):
            val = self[i]
            index_list = indexer_map.get(val, None)
            if index_list is None:
                indexer_map[val] = List.empty_list(types.int64)
                indexer_map[val].append(i)
            else:
                index_list.append(i)

        return indexer_map
Esempio n. 19
0
    def __init__(
        self,
        path,
        input_units,
        nside,
        interpolation_kind="linear",
        has_polarization=True,
        map_dist=None,
        verbose=False,
    ):
        """PySM component interpolating between precomputed maps

        In order to save memory, maps are converted to float32, if this is not acceptable, please
        open an issue on the PySM repository.
        When you create the model, PySM checks the folder of the templates and stores a list of
        available frequencies. Once you call `get_emission`, maps are read, ud_graded to the target
        nside and stored for future use. This is useful if you are running many channels
        with a similar bandpass.
        If not, you can call `cached_maps.clear()` to remove the cached maps.

        Parameters
        ----------
        path : str
            Path should contain maps named as the frequency in GHz
            e.g. 20.fits or 20.5.fits or 00100.fits
        input_units : str
            Any unit available in PySM3 e.g. "uK_RJ", "uK_CMB"
        nside : int
            HEALPix NSIDE of the output maps
        interpolation_kind : string
            Currently only linear is implemented
        has_polarization : bool
            whether or not to simulate also polarization maps
        map_dist : pysm.MapDistribution
            Required for partial sky or MPI, see the PySM docs
        verbose : bool
            Control amount of output
        """

        super().__init__(nside=nside, map_dist=map_dist)
        self.maps = {}
        self.maps = self.get_filenames(path)

        # use a numba typed Dict so we can used in JIT compiled code
        self.cached_maps = Dict.empty(key_type=types.float32,
                                      value_type=types.float32[:, :])

        self.freqs = np.array(list(self.maps.keys()))
        self.freqs.sort()
        self.input_units = input_units
        self.has_polarization = has_polarization
        self.interpolation_kind = interpolation_kind
        self.verbose = verbose
def get_areas(watershed_markers):
    """get the areas of the particles"""

    # dictionary mapping colors to their areas
    particle_areas = Dict.empty(
        key_type=types.
        int64,  # don't need int64 but compiler throws warnings otherwise
        value_type=types.float64)

    particle_areas = get_areas_helper(watershed_markers, particle_areas)

    return particle_areas
 def blocked_doors(self):
     """
     All closed doors on the map
     :return:
     """
     blocked_doors = Dict.empty(key_type=types.UniTuple(int32, 2),
                                value_type=int32)
     for obj in self.list_of_objects:
         if obj.flag in {"door_h", "door_v"} and obj.blocked:
             i, j = mapping(obj.x, obj.y)
             blocked_doors[(i, j)] = 0
     return blocked_doors
Esempio n. 22
0
    def test_getitem_key(self):
        pyfunc = getitem_key
        cfunc = jit(nopython=True)(pyfunc)

        for x, i in [(np.array('123'), ()), (np.array(['123']), 0),
                     (np.array(b'123'), ()), (np.array([b'123']), 0)]:
            d1 = {}
            d2 = Dict.empty(from_dtype(x.dtype), types.int64)
            pyfunc(d1, x, i)
            cfunc(d2, x, i)
            self.assertEqual(d1, d2)
            # check for charseq to str conversion:
            str(d2)
Esempio n. 23
0
def _f(win, window1, window2):
    """
    Count class.
    Return $f = 1 - \frac{\sum_{i=1}^{p}{|a_{1i} -a_{2i}|}}{2w^{2}}$
    """
    unq1 = unique(window1)
    unq2 = unique(window2)

    cnt1 = Dict.empty(
        key_type=types.int64,
        value_type=types.int64,
    )
    cnt2 = Dict.empty(
        key_type=types.int64,
        value_type=types.int64,
    )

    for key in unq1:
        cnt1[key] = (window1 == key).sum()

    for key in unq2:
        cnt2[key] = (window2 == key).sum()

    A = set(cnt1.keys())
    B = set(cnt2.keys())
    common = list(A.intersection(B))
    only_in_A = list(A - B)
    only_in_B = list(B - A)

    aki = 0

    for cl in common:
        aki += abs(cnt1[cl] - cnt2[cl])
    for cl in only_in_A:
        aki += cnt1[cl]
    for cl in only_in_B:
        aki += cnt2[cl]
    f = 1 - aki / (2.0 * win * win)
    return f
Esempio n. 24
0
    def get_continuation_values(self, period):
        """Get continuation values.

        The function takes the expected value functions from the previous periods and
        then uses the indices of child states to put these expected value functions in
        the correct format. If period is equal to self.n_periods - 1 the function
        returns arrays of zeros since we are in terminal states. Otherwise we retrieve
        expected value functions for next period and call
        :func:`_get_continuation_values` to assign continuation values to all choices
        within a period. (The object `subset_expected_value_functions` is required
        because we need a Numba typed dict but the function
        :meth:`StateSpace.get_attribute_from_period` just returns a normal dict)

        Returns
        -------
        continuation_values : numba.typed.Dict
            The continuation values for each dense key in a :class:`numpy.ndarray`.

        See also
        --------
        _get_continuation_values
            A more theoretical explanation can be found here: See :ref:`get continuation
            values <get_continuation_values>`.

        """
        if period == self.n_periods - 1:
            shapes = self.get_attribute_from_period("base_draws_sol", period)
            states = self.get_attribute_from_period("dense_key_to_core_indices", period)
            continuation_values = {
                key: np.zeros((states[key].shape[0], shapes[key].shape[1]))
                for key in shapes
            }
        else:
            child_indices = self.get_attribute_from_period("child_indices", period)
            expected_value_functions = self.get_attribute_from_period(
                "expected_value_functions", period + 1
            )
            subset_expected_value_functions = Dict.empty(
                key_type=nb.types.int64, value_type=nb.types.float64[:]
            )
            for key, value in expected_value_functions.items():
                subset_expected_value_functions[key] = value

            continuation_values = _get_continuation_values(
                self.get_attribute_from_period("dense_key_to_core_indices", period),
                self.get_attribute_from_period("dense_key_to_complex", period),
                child_indices,
                self.core_key_and_dense_index_to_dense_key,
                bypass={"expected_value_functions": subset_expected_value_functions},
            )
        return continuation_values
Esempio n. 25
0
    def __init__(self, jsonfile, numpy_lib, backend):
        with open(jsonfile) as fin:
            goldenjson = json.load(fin)
        self._masks = Dict.empty(key_type=types.int64,
                                 value_type=types.int64[:])

        self.backend = backend
        self.numpy_lib = numpy_lib

        for run, lumilist in goldenjson.items():
            run = int(run)
            mask = self.numpy_lib.array(lumilist).flatten()
            mask[::2] -= 1
            self._masks[int(run)] = mask
Esempio n. 26
0
 def test_false_pos(self):
     BE, akd_get, akd_includes, akd_insert = AKD(unicode_type)
     akd = Dict.empty(u4, BE)
     a = np.array([1, 2, 3, 4, 5], np.uint32)
     b = np.array([1, 2, 3, 4], np.uint32)
     c = np.array([5, 5, 3, 4], np.uint32)
     akd_insert(akd, a, "A")
     akd_insert(akd, b, "B")
     self.assertTrue(akd_includes(akd, a))
     self.assertTrue(akd_includes(akd, b))
     self.assertFalse(akd_includes(akd, c))
     self.assertEqual(akd_get(akd, a), "A")
     self.assertEqual(akd_get(akd, b), "B")
     self.assertEqual(akd_get(akd, c), None)
Esempio n. 27
0
def extra_edges(indices, indptr, last_level, seen, hops):
    edges = []
    mapping = Dict.empty(
        key_type=types.int64,
        value_type=types.int64,
    )
    for u in last_level:
        nbrs = indices[indptr[u]:indptr[u + 1]]
        nbrs = nbrs[seen[nbrs] == hops]
        mapping[u] = 1
        for v in nbrs:
            if not v in mapping:
                edges.append((u, v))
    return edges
Esempio n. 28
0
    def _load_variables(self, mask):

        self._rsps = Dict.empty(key_type=types.unicode_type,
                                value_type=types.float64[:, :])

        for key, value in self._detector_group.items():

            if key[0] == "z":

                match = re.match("^z0*(\d+)_az0*(\d+)$", key)
                z, az = map(str, match.groups())

                self._rsps["%s_%s" % (az, z)] = np.ascontiguousarray(
                    value[()][:, mask])
Esempio n. 29
0
    def _predict(self, X):
        """Predict class values of all instances in X.

        Parameters
        ----------
        X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
            The data to make predictions for.

        Returns
        -------
        y : array-like, shape = [n_instances]
            Predicted class labels.
        """
        num_cases = X.shape[0]

        if self.n_dims_ > 1:
            words = ([
                Dict.empty(key_type=types.UniTuple(types.int64, 2),
                           value_type=types.uint32) for _ in range(num_cases)
            ] if self.typed_dict else
                     [defaultdict(int) for _ in range(num_cases)])

            for i, dim in enumerate(self._dims):
                X_dim = X[:, dim, :].reshape(num_cases, 1, self.series_length_)
                dim_words = self._transformers[i].transform(X_dim)
                dim_words = dim_words[0]

                for n in range(num_cases):
                    if self.typed_dict:
                        for word, count in dim_words[n].items():
                            if self.levels > 1:
                                words[n][(word[0],
                                          word[1] << self._highest_dim_bit
                                          | dim)] = count
                            else:
                                words[n][(word, dim)] = count
                    else:
                        for word, count in dim_words[n].items():
                            words[n][word << self._highest_dim_bit
                                     | dim] = count

            test_bags = words
        else:
            test_bags = self._transformers[0].transform(X)
            test_bags = test_bags[0]

        classes = Parallel(n_jobs=self._threads_to_use)(
            delayed(self._test_nn)(test_bag, ) for test_bag in test_bags)

        return np.array(classes)
Esempio n. 30
0
    def sdc_reindex_series_impl(arr, index, name, by_index):

        # no reindexing is needed if indexes are equal
        if range_indexes == True:  # noqa
            equal_indexes = numpy_like.array_equal(index, by_index)
        elif int64_indexes == True:  # noqa
            equal_indexes = numpy_like.array_equal(index, by_index)
        else:
            equal_indexes = False
        if (index is by_index or equal_indexes):
            return pandas.Series(data=arr, index=by_index, name=name)

        if data_is_str_arr == True:  # noqa
            _res_data = [''] * len(by_index)
            res_data_nan_mask = numpy.zeros(len(by_index), dtype=types.bool_)
        else:
            _res_data = numpy.empty(len(by_index), dtype=data_dtype)

        # build a dict of self.index values to their positions:
        map_index_to_position = Dict.empty(key_type=index_dtype,
                                           value_type=types.int32)

        for i, value in enumerate(index):
            if value in map_index_to_position:
                raise ValueError("cannot reindex from a duplicate axis")
            else:
                map_index_to_position[value] = i

        index_mismatch = 0
        for i in numba.prange(len(by_index)):
            val = by_index[i]
            if val in map_index_to_position:
                pos_in_self = map_index_to_position[val]
                _res_data[i] = arr[pos_in_self]
                if data_is_str_arr == True:  # noqa
                    res_data_nan_mask[i] = isna(arr, i)
            else:
                index_mismatch += 1
        if index_mismatch:
            msg = "Unalignable boolean Series provided as indexer " + \
                  "(index of the boolean Series and of the indexed object do not match)."
            raise IndexingError(msg)

        if data_is_str_arr == True:  # noqa
            res_data = create_str_arr_from_list(_res_data)
            str_arr_set_na_by_mask(res_data, res_data_nan_mask)
        else:
            res_data = _res_data

        return pandas.Series(data=res_data, index=by_index, name=name)
Esempio n. 31
0
    def __init__(self, target, width=30, verbose=1, interval=0.05,
                 stateful_metrics=None, dynamic_display=True):
        """
        Displays a pretty progress bar on screen. Inspired from keras.utils.Progbar

        :param target: The total number of steps expected, None if unknown.
        :param width: Progress bar width on screen.
        :param verbose: Verbosity: 0 (silent), 1 (verbose)
        :param interval: Minimum visual progress update interval (in seconds).
        :param stateful_metrics: Iterable of names of the metrics that should not be averaged over time.

        `>>> bar = ProgresBar(10, stateful_metrics=['error', 'cpu_usage'])`
        """
        self.target = -1 if target is None else target
        self.width = width
        self.verbose = verbose
        self.interval = interval

        self._metrics_sum = Dict.empty(
            key_type=types.unicode_type,
            value_type=types.float64
        )
        self._metrics_count = Dict.empty(
            key_type=types.unicode_type,
            value_type=types.int32
        )

        for metric_name in stateful_metrics.split('|'):
            if metric_name != '':
                self._metrics_sum[metric_name] = 0
                self._metrics_count[metric_name] = -1

        self._dynamic_display = dynamic_display
        self._total_width = 0
        self._seen_so_far = 0
        self._start = get_seconds_since_epoch()
        self._last_update = 0
Esempio n. 32
0
def test_lumidata():
    from numba import types
    from numba.typed import Dict

    lumidata = LumiData("tests/samples/lumi_small.csv")

    # pickle & unpickle
    lumidata_pickle = cloudpickle.loads(cloudpickle.dumps(lumidata))

    # check same internal lumidata
    assert np.all(lumidata._lumidata == lumidata_pickle._lumidata)

    runslumis = np.zeros((10, 2), dtype=np.uint32)
    results = {"lumi": {}, "index": {}}
    for ld in lumidata, lumidata_pickle:
        runslumis[:, 0] = ld._lumidata[0:10, 0]
        runslumis[:, 1] = ld._lumidata[0:10, 1]
        lumi = ld.get_lumi(runslumis)
        results["lumi"][ld] = lumi
        diff = abs(lumi - 1.539941814)
        print("lumi:", lumi, "diff:", diff)
        assert diff < 1e-4

        # test build_lumi_table_kernel
        py_index = Dict.empty(
            key_type=types.Tuple([types.uint32, types.uint32]), value_type=types.float64
        )
        pyruns = ld._lumidata[:, 0].astype("u4")
        pylumis = ld._lumidata[:, 1].astype("u4")
        LumiData._build_lumi_table_kernel.py_func(
            pyruns, pylumis, ld._lumidata, py_index
        )

        assert len(py_index) == len(ld.index)

        # test get_lumi_kernel
        py_tot_lumi = np.zeros((1,), dtype=np.float64)
        LumiData._get_lumi_kernel.py_func(
            runslumis[:, 0], runslumis[:, 1], py_index, py_tot_lumi
        )

        assert abs(py_tot_lumi[0] - lumi) < 1e-4

        # store results:
        results["lumi"][ld] = lumi
        results["index"][ld] = ld.index

    assert np.all(results["lumi"][lumidata] == results["lumi"][lumidata_pickle])
    assert len(results["index"][lumidata]) == len(results["index"][lumidata_pickle])
Esempio n. 33
0
    def test_hash_conflict(self):
        BE, akd_get, akd_includes, akd_insert = AKD(unicode_type)
        akd = Dict.empty(u4, BE)
        a = np.array([1, 2, 3, 4, 5], np.uint32)
        b = np.array([1, 2, 3, 4], np.uint32)
        c = np.array([5, 5, 3, 4], np.uint32)
        akd_insert(akd, a, "C", 0)
        akd_insert(akd, b, "D", 0)
        self.assertTrue(akd_includes(akd, a, 0))
        self.assertEqual(akd_get(akd, a, 0), "C")
        self.assertTrue(akd_includes(akd, b, 0))
        self.assertEqual(akd_get(akd, b, 0), "D")

        self.assertFalse(akd_includes(akd, c, 0))
        self.assertEqual(akd_get(akd, c, 0), None)
Esempio n. 34
0
    def _shorten_bags(self, word_len):
        if self.save_words is False:
            raise ValueError(
                "Words from transform must be saved using save_word to shorten bags."
            )

        if word_len > self.word_length:
            word_len = self.word_length

        if self.typed_dict:
            warnings.simplefilter("ignore", category=NumbaTypeSafetyWarning)

        dim = Parallel(n_jobs=self.n_jobs)(
            delayed(self._shorten_case)(word_len, i) for i in range(len(self.words))
        )

        # cant pickle typed dict
        if self.typed_dict and self.n_jobs != 1:
            nl = [None] * len(dim)
            for i, pdict in enumerate(dim):
                ndict = (
                    Dict.empty(
                        key_type=types.UniTuple(types.int64, 2), value_type=types.uint32
                    )
                    if self.levels > 1
                    else Dict.empty(key_type=types.int64, value_type=types.uint32)
                )
                for key, val in pdict.items():
                    ndict[key] = val
                nl[i] = pdict
            dim = nl

        new_bags = pd.DataFrame() if self.return_pandas_data_series else [None]
        new_bags[0] = list(dim)

        return new_bags
Esempio n. 35
0
 def __init__(self, lumi_csv):
     self._lumidata = np.loadtxt(
         lumi_csv,
         delimiter=',',
         usecols=(0, 1, 6, 7),
         converters={
             0: lambda s: s.split(b':')[0],
             1: lambda s: s.split(b':')[
                 0
             ],  # not sure what lumi:0 means, appears to be always zero (DAQ off before beam dump?)
         })
     self.index = Dict.empty(key_type=types.Tuple(
         [types.uint32, types.uint32]),
                             value_type=types.float64)
     self.build_lumi_table()
Esempio n. 36
0
def RemoveHoles(data, iz, iy, ix):
    # start timing statistics
    total_time = time.time()

    # read in the associated labels and the connected components
    read_time = time.time()
    components = ReadH5File('{}/components.h5'.format(
        data.TempBlockDirectory(iz, iy, ix)))
    # need to first create separate empty numba Dict
    associated_label_dict = Dict.empty(key_type=types.int64,
                                       value_type=types.int64)
    associated_label_dict.update(
        ReadPickledData('{}/hole-filling-associated-labels.pickle'.format(
            data.TempDirectory())))
    read_time = time.time() - read_time

    # remove all the holes with the associated labels dictionary
    hole_fill_time = time.time()
    components = AssignBackgroundAssociatedLabels(components,
                                                  associated_label_dict)
    hole_fill_time = time.time() - hole_fill_time

    # write the updated components to disk
    write_time = time.time()
    output_directory = data.HoleFillingOutputDirectory()
    output_filename = '{}/{:04d}z-{:04d}y-{:04d}x.h5'.format(
        output_directory, iz, iy, ix)
    WriteH5File(components, output_filename)
    write_time = time.time() - write_time

    total_time = time.time() - total_time

    print('Read Time: {:0.2f} seconds.'.format(read_time))
    print('Hole Fill Time: {:0.2f} seconds.'.format(hole_fill_time))
    print('Write Time: {:0.2f} seconds.'.format(write_time))
    print('Total Time: {:0.2f} seconds.'.format(total_time))

    # output timing statistics
    timing_directory = '{}/fill-holes'.format(data.TimingDirectory())
    if not os.path.exists(timing_directory):
        os.makedirs(timing_directory, exist_ok=True)
    timing_filename = '{}/{:04d}z-{:04d}y-{:04d}x.txt'.format(
        timing_directory, iz, iy, ix)
    with open(timing_filename, 'w') as fd:
        fd.write('Read Time: {:0.2f} seconds.\n'.format(read_time))
        fd.write('Hole Fill Time: {:0.2f} seconds.\n'.format(hole_fill_time))
        fd.write('Write Time: {:0.2f} seconds.\n'.format(write_time))
        fd.write('Total Time: {:0.2f} seconds.\n'.format(total_time))
Esempio n. 37
0
 def test_basic(self):
     d = Dict.empty(int32, float32)
     # len
     self.assertEqual(len(d), 0)
     # setitems
     d[1] = 1
     d[2] = 2.3
     d[3] = 3.4
     self.assertEqual(len(d), 3)
     # keys
     self.assertEqual(list(d.keys()), [1, 2, 3])
     # values
     for x, y in zip(list(d.values()), [1, 2.3, 3.4]):
         self.assertAlmostEqual(x, y, places=4)
     # getitem
     self.assertAlmostEqual(d[1], 1)
     self.assertAlmostEqual(d[2], 2.3, places=4)
     self.assertAlmostEqual(d[3], 3.4, places=4)
     # deltiem
     del d[2]
     self.assertEqual(len(d), 2)
     # get
     self.assertIsNone(d.get(2))
     # setdefault
     d.setdefault(2, 100)
     d.setdefault(3, 200)
     self.assertEqual(d[2], 100)
     self.assertAlmostEqual(d[3], 3.4, places=4)
     # update
     d.update({4: 5, 5: 6})
     self.assertAlmostEqual(d[4], 5)
     self.assertAlmostEqual(d[5], 6)
     # contains
     self.assertTrue(4 in d)
     # items
     pyd = dict(d.items())
     self.assertEqual(len(pyd), len(d))
     # pop
     self.assertAlmostEqual(d.pop(4), 5)
     # popitem
     nelem = len(d)
     k, v = d.popitem()
     self.assertEqual(len(d), nelem - 1)
     self.assertTrue(k not in d)
     # __eq__ & copy
     copied = d.copy()
     self.assertEqual(copied, d)
     self.assertEqual(list(copied.items()), list(d.items()))
Esempio n. 38
0
 def test_basic(self):
     d = Dict.empty(int32, float32)
     # len
     self.assertEqual(len(d), 0)
     # setitems
     d[1] = 1
     d[2] = 2.3
     d[3] = 3.4
     self.assertEqual(len(d), 3)
     # keys
     self.assertEqual(list(d.keys()), [1, 2, 3])
     # values
     for x, y in zip(list(d.values()), [1, 2.3, 3.4]):
         self.assertAlmostEqual(x, y, places=4)
     # getitem
     self.assertAlmostEqual(d[1], 1)
     self.assertAlmostEqual(d[2], 2.3, places=4)
     self.assertAlmostEqual(d[3], 3.4, places=4)
     # deltiem
     del d[2]
     self.assertEqual(len(d), 2)
     # get
     self.assertIsNone(d.get(2))
     # setdefault
     d.setdefault(2, 100)
     d.setdefault(3, 200)
     self.assertEqual(d[2], 100)
     self.assertAlmostEqual(d[3], 3.4, places=4)
     # update
     d.update({4: 5, 5: 6})
     self.assertAlmostEqual(d[4], 5)
     self.assertAlmostEqual(d[5], 6)
     # contains
     self.assertTrue(4 in d)
     # items
     pyd = dict(d.items())
     self.assertEqual(len(pyd), len(d))
     # pop
     self.assertAlmostEqual(d.pop(4), 5)
     # popitem
     nelem = len(d)
     k, v = d.popitem()
     self.assertEqual(len(d), nelem - 1)
     self.assertTrue(k not in d)
     # __eq__ & copy
     copied = d.copy()
     self.assertEqual(copied, d)
     self.assertEqual(list(copied.items()), list(d.items()))
Esempio n. 39
0
 def check_stringify(self, strfn, prefix=False):
     nbd = Dict.empty(int32, int32)
     d = {}
     nbd[1] = 2
     d[1] = 2
     checker = self.assertIn if prefix else self.assertEqual
     checker(strfn(d), strfn(nbd))
     nbd[2] = 3
     d[2] = 3
     checker(strfn(d), strfn(nbd))
     for i in range(10, 20):
         nbd[i] = i + 1
         d[i] = i + 1
     checker(strfn(d), strfn(nbd))
     if prefix:
         self.assertTrue(strfn(nbd).startswith('DictType'))
Esempio n. 40
0
 def check_stringify(self, strfn, prefix=False):
     nbd = Dict.empty(int32, int32)
     d = {}
     nbd[1] = 2
     d[1] = 2
     checker = self.assertIn if prefix else self.assertEqual
     checker(strfn(d), strfn(nbd))
     nbd[2] = 3
     d[2] = 3
     checker(strfn(d), strfn(nbd))
     for i in range(10, 20):
         nbd[i] = i + 1
         d[i] = i + 1
     checker(strfn(d), strfn(nbd))
     if prefix:
         self.assertTrue(strfn(nbd).startswith('DictType'))
def to_typed_dict_rule_tensor(untyped_d, dimension, pi=False):
    if dimension == 1:
        t = types.float64[:]
    elif dimension == 2:
        t = types.float64[:, :]
    elif dimension == 3:
        t = types.float64[:, :, :]
    typed_d = Dict.empty(key_type=types.int64, value_type=t)
    if pi:
        for nonterm, tensor, in untyped_d.items():
            typed_d[nonterm] = tensor.astype(np.float64)
    else:
        for rule, tensor in untyped_d.items():
            assert (hash(rule) not in typed_d)
            typed_d[hash(rule)] = tensor.astype(np.float64)
    return typed_d
Esempio n. 42
0
    def test_getitem_return_type(self):
        # Dict.__getitem__ must return non-optional type.
        d = Dict.empty(types.int64, types.int64[:])
        d[1] = np.arange(10, dtype=np.int64)

        @njit
        def foo(d):
            d[1] += 100
            return d[1]

        foo(d)
        # Return type is an array, not optional
        retty = foo.nopython_signatures[0].return_type
        self.assertIsInstance(retty, types.Array)
        self.assertNotIsInstance(retty, types.Optional)
        # Value is correctly updated
        self.assertPreciseEqual(d[1], np.arange(10, dtype=np.int64) + 100)
Esempio n. 43
0
    def test_str_key_array_value(self):
        np.random.seed(123)
        d = Dict.empty(
            key_type=types.unicode_type,
            value_type=types.float64[:],
        )
        expect = []
        expect.append(np.random.random(10))
        d['mass'] = expect[-1]
        expect.append(np.random.random(20))
        d['velocity'] = expect[-1]
        for i in range(100):
            expect.append(np.random.random(i))
            d[str(i)] = expect[-1]
        self.assertEqual(len(d), len(expect))
        self.assertPreciseEqual(d['mass'], expect[0])
        self.assertPreciseEqual(d['velocity'], expect[1])
        # Ordering is kept
        for got, exp in zip(d.values(), expect):
            self.assertPreciseEqual(got, exp)

        # Try deleting
        self.assertTrue('mass' in d)
        self.assertTrue('velocity' in d)
        del d['mass']
        self.assertFalse('mass' in d)
        del d['velocity']
        self.assertFalse('velocity' in d)
        del expect[0:2]

        for i in range(90):
            k, v = d.popitem()
            w = expect.pop()
            self.assertPreciseEqual(v, w)

        # Trigger a resize
        expect.append(np.random.random(10))
        d["last"] = expect[-1]

        # Ordering is kept
        for got, exp in zip(d.values(), expect):
            self.assertPreciseEqual(got, exp)
Esempio n. 44
0
 def _create_dom_channel_lookup(self):
     if HAVE_NUMBA:
         from numba.typed import Dict
         from numba import types
         data = Dict.empty(
             key_type=types.i8, value_type=types.float64[:, :]
         )
     else:
         data = {}
     for pmt in self.detector.pmts:
         if pmt.dom_id not in data:
             data[pmt.dom_id] = np.zeros((31, 9))
         data[pmt.dom_id][pmt.channel_id] = np.asarray([
             pmt.pos_x, pmt.pos_y, pmt.pos_z, pmt.dir_x, pmt.dir_y,
             pmt.dir_z, pmt.t0, pmt.du, pmt.floor
         ],
                                                       dtype=np.float64)
     self._calib_by_dom_and_channel = data
     if HAVE_NUMBA:
         self._lookup_tables = [(dom, cal) for dom, cal in data.items()]
Esempio n. 45
0
 def _create_pmt_id_lookup(self):
     if HAVE_NUMBA:
         from numba.typed import Dict
         from numba import types
         data = Dict.empty(key_type=types.i8, value_type=types.float64[:])
     else:
         data = {}
     for pmt in self.detector.pmts:
         data[pmt.pmt_id] = np.asarray([
             pmt.pos_x,
             pmt.pos_y,
             pmt.pos_z,
             pmt.dir_x,
             pmt.dir_y,
             pmt.dir_z,
             pmt.t0,
             pmt.du,
             pmt.floor,
         ],
                                       dtype=np.float64)
     self._calib_by_pmt_id = data
Esempio n. 46
0
def ex_typed_dict_from_cpython():
    # magictoken.ex_typed_dict_from_cpython.begin
    import numpy as np
    from numba import njit
    from numba import types
    from numba.typed import Dict

    # The Dict.empty() constructs a typed dictionary.
    # The key and value typed must be explicitly declared.
    d = Dict.empty(
        key_type=types.unicode_type,
        value_type=types.float64[:],
    )

    # The typed-dict can be used from the interpreter.
    d['posx'] = np.asarray([1, 0.5, 2], dtype='f8')
    d['posy'] = np.asarray([1.5, 3.5, 2], dtype='f8')
    d['velx'] = np.asarray([0.5, 0, 0.7], dtype='f8')
    d['vely'] = np.asarray([0.2, -0.2, 0.1], dtype='f8')

    # Here's a function that expects a typed-dict as the argument
    @njit
    def move(d):
        # inplace operations on the arrays
        d['posx'] += d['velx']
        d['posy'] += d['vely']

    print('posx: ', d['posx'])  # Out: posx:  [1.  0.5 2. ]
    print('posy: ', d['posy'])  # Out: posy:  [1.5 3.5 2. ]

    # Call move(d) to inplace update the arrays in the typed-dict.
    move(d)

    print('posx: ', d['posx'])  # Out: posx:  [1.5 0.5 2.7]
    print('posy: ', d['posy'])  # Out: posy:  [1.7 3.3 2.1]
    # magictoken.ex_typed_dict_from_cpython.end

    # Test
    np.testing.assert_array_equal(d['posx'], [1.5, 0.5, 2.7])
    np.testing.assert_array_equal(d['posy'], [1.7, 3.3, 2.1])
Esempio n. 47
0
 def inner_numba_dict():
     d = Dict.empty(
         key_type=types.intp,
         value_type=types.intp,
     )
     return d
Esempio n. 48
0
 def make_dict():
     return Dict.empty(kt, vt)
Esempio n. 49
0
 def foo():
     Dict.empty(types.intp, ty)
Esempio n. 50
0
 def producer():
     d = Dict.empty(int32, float64)
     d[1] = 1.23
     return d
Esempio n. 51
0
 def call_ctor():
     return Dict.empty(kt, vt)