def ugly_workaround(grid, C, points, out=None, order=1, diff="None", extrap_mode="linear"): return (literally(order), literally(diff), literally(extrap_mode))
def __eval_cubic(grid, C, points): # print("We allocate with default extrapolation.") return lambda grid, C, points: eval_spline(grid, C, points, order=literally(3), extrap_mode=literally('linear'), diff=literally("None"))
def __eval_cubic(grid, C, points, out): return lambda grid, C, points, out: eval_spline(grid, C, points, out=out, order=literally(3), diff=literally("None"), extrap_mode=literally( 'linear'))
def convert_inplace(self, series, allow_missing=False): """Convert IDs to indices in-place. series is a Pandas series of indices. If allow_missing is set to True, then unrecognised IDs do not raise KeyError, but are instead replaced with SENTINEL. """ arr = series.to_numpy() # nb.literally makes Numba compile once for every value. _convert_in2ind_inplace( self.id2ind, self.ind2id_map.ind2id, nb.literally(len(self.id2ind)), arr, nb.literally(allow_missing))
def ol_bar(d): a = { "A": 1, "B": 1, "C": 1, "D": 1, "E": 1, "F": 1, "G": 1, "H": 1, "I": 1, "J": 1, "K": 1, "L": 1, "M": 1, "N": 1, "O": 1, "P": 1, "Q": 1, "R": 1, "S": 7, } if d.initial_value is None: return lambda d: literally(d) self.assertTrue(isinstance(d, types.DictType)) self.assertEqual(d.initial_value, a) return lambda d: d
def ol_bar(l): if l.initial_value is None: return lambda l: literally(l) self.assertTrue(isinstance(l, types.List)) self.assertEqual(l.initial_value, [1, 2, 3]) self.assertEqual(hasattr(l, 'literal_value'), False) return lambda l: l
def __eval_cubic(grid, C, points, out, extrap_mode): if extrap_mode == t_NEAREST: extrap_ = literally('nearest') elif extrap_mode == t_CONSTANT: extrap_ = literally('constant') elif extrap_mode == t_cubic: extrap_ = literally('cubic') else: return None return lambda grid, C, points, out, extrap_mode: eval_spline( grid, C, points, out=out, order=literally(3), diff=literally("None"), extrap_mode=extrap_)
def eval_spline(grid, C, points, out=None, order=1, diff="None", extrap_mode="linear"): """Do I get a docstring ?""" dd = numba.literally(diff) k = numba.literally(order) extrap_ = numba.literally(extrap_mode) return _eval_spline(grid, C, points, out=out, order=k, diff=dd, extrap_mode=extrap_)
def __eval_cubic(grid, C, points, extrap_mode): # print(f"We are going to extrapolate in {extrap_mode} mode.") if extrap_mode == t_NEAREST: extrap_ = literally("nearest") elif extrap_mode == t_CONSTANT: extrap_ = literally("constant") elif extrap_mode == t_cubic: extrap_ = literally("cubic") else: return None return lambda grid, C, points, extrap_mode: eval_spline( grid, C, points, order=literally(3), diff=literally("None"), extrap_mode=extrap_)
def ov_power(x, n): if isinstance(n, numba.types.Literal): # only if `n` is a literal if n.literal_value == 2: # special case: square print("square") return lambda x, n: x * x elif n.literal_value == 3: # special case: cubic print("cubic") return lambda x, n: x * x * x else: # If `n` is not literal, request literal dispatch return lambda x, n: numba.literally(n) print("generic") return lambda x, n: x ** n
def ol_bar(d): a = { "a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5, "a6": 6, "a7": 7, "a8": 8, "a9": 9, "a10": 10, "a11": 11, "a12": 12, "a13": 13, "a14": 14, "a15": 15, "a16": 16, "a17": 17, "a18": 18, "a19": 19, "a20": 20, "a21": 21, "a22": 22, "a23": 23, "a24": 24, "a25": 25, "a26": 26, "a27": 27, "a28": 28, "a29": 29, "a30": 30, "a31": 31, "a32": 32, "a33": 33, "a34": 34, # 34 items is the limit of # (LOAD_CONST + MAP_ADD)^n + DICT_UPDATE "a35": 35, # 35 Generates an additional BUILD_MAP + DICT_UPDATE } if d.initial_value is None: return lambda d: literally(d) self.assertTrue(isinstance(d, types.DictType)) self.assertEqual(d.initial_value, a) return lambda d: d
def make_id_hash_map(ids, path, offset=0): """Make ID to index hash map. ids is an ordered NumPy array of IDs. path is the destination. offset is added to the indices before storing. """ with open(path, 'wb+') as f: # Trick: seek past the end of file and write one byte to # efficiently zero-fill up to that point. f.seek(get_hash_map_size(len(ids)) * np.uint32().nbytes - 1) f.write(b'\x00') f.flush() id2ind_mmap = mmap.mmap(f.fileno(), 0) with id2ind_mmap: id2ind = np.frombuffer(id2ind_mmap, dtype=np.uint32) id2ind[...] = SENTINEL # Important detail: set all to SENTINEL. arr_size = len(id2ind) # nb.literally causes Numba to compile the function once for # every value. Avoids the CPU's slow division instruction. _make_hash_map(ids, id2ind, nb.literally(arr_size), offset)
def sdc_pandas_dataframe_getitem_idx_unicode_str_impl(self, idx): # just call literally as it will raise and compilation will continue via common impl return literally(idx)
def pyfunc(tup, idx): idx = literally(idx) return tup[idx]
def ol_bar(d): if d.initial_value is None: return lambda d: literally(d) self.assertTrue(isinstance(d, types.List)) self.assertEqual(d.initial_value, [1, 2, 3]) return lambda d: d
def full_slice_array(a, n): # Since numba slices can't be boxed at the moment return a[build_full_slice_tuple(literally(n))]
def foo(x): return numba.literally(x)
def ol_specialize(x): iv = x.initial_value if iv is None: return lambda x: literally(x) # Force literal dispatch assert iv == [1, 2, 3] # INITIAL VALUE return lambda x: x
def gridder(uvw, vis, wavelengths, chanmap, npix, cell, image_centre, phase_centre, convolution_kernel, convolution_kernel_width, convolution_kernel_oversampling, baseline_transform_policy, phase_transform_policy, stokes_conversion_policy, convolution_policy, grid_dtype=np.complex128, do_normalize=False): """ 2D Convolutional gridder, contiguous to discrete @uvw: value coordinates, (nrow, 3) @vis: complex data, (nrow, nchan, ncorr) @wavelengths: wavelengths of data channels @chanmap: MFS band mapping @npix: number of pixels per axis @cell: cell_size in degrees @image_centre: new phase centre of image (radians, ra, dec) @phase_centre: original phase centre of data (radians, ra, dec) @convolution_kernel: packed kernel as generated by kernels package @convolution_kernel_width: number of taps in kernel @convolution_kernel_oversampling: number of oversampled points in kernel @baseline_transform_policy: any accepted policy in .policies.baseline_transform_policies, can be used to tilt image planes for polyhedron faceting @phase_transform_policy: any accepted policy in .policies.phase_transform_policies, can be used to facet at provided facet @image_centre @stokes_conversion_policy: any accepted correlation to stokes conversion policy in .policies.stokes_conversion_policies @convolution_policy: any accepted convolution policy in .policies.convolution_policies @grid_dtype: accumulation grid dtype (default complex 128) @do_normalize: normalize grid by convolution weights """ if chanmap.size != wavelengths.size: raise ValueError( "Chanmap and corresponding wavelengths must match in shape") chanmap = chanmap.ravel() wavelengths = wavelengths.ravel() nband = np.max(chanmap) + 1 nrow, nvischan, ncorr = vis.shape if uvw.shape[1] != 3: raise ValueError("UVW array must be array of tripples") if uvw.shape[0] != nrow: raise ValueError( "UVW array must have same number of rows as vis array") if nvischan != wavelengths.size: raise ValueError("Chanmap must correspond to visibility channels") gridstack = np.zeros((nband, npix, npix), dtype=grid_dtype) # scale the FOV using the simularity theorem scale_factor = npix * cell / 3600.0 * np.pi / 180.0 wt_ch = np.zeros(nband, dtype=np.float64) for r in range(nrow): ra0, dec0 = phase_centre ra, dec = image_centre ptp.policy(vis[r, :, :], uvw[r, :], wavelengths, ra0, dec0, ra, dec, policy_type=literally(phase_transform_policy), phasesign=1.0) btp.policy(uvw[r, :], ra0, dec0, ra, dec, literally(baseline_transform_policy)) for c in range(nvischan): scaled_u = uvw[r, 0] * scale_factor / wavelengths[c] scaled_v = uvw[r, 1] * scale_factor / wavelengths[c] scaled_w = uvw[r, 2] * scale_factor / wavelengths[c] grid = gridstack[chanmap[c], :, :] wt_ch[chanmap[c]] += cp.policy( scaled_u, scaled_v, scaled_w, npix, grid, vis, r, c, convolution_kernel, convolution_kernel_width, convolution_kernel_oversampling, literally(stokes_conversion_policy), policy_type=literally(convolution_policy)) if do_normalize: for c in range(nband): gridstack[c, :, :] /= wt_ch[c] + 1.0e-8 return gridstack
def impl(self, dtype): return astype(self, literally(dtype))
def numba_reduce(reduce_op, x, axis, keepdims=False) -> np.ndarray: """Reduce an array along the given axes. Parameters ---------- reduce_op The reduce operation to call for each element of the output array. The input to reduce_op is a flattened, contigous array representing the window upon which reduce_op operates. It returns a scalar. x : np.ndarray The array to reduce. axis : ArrayLike The axis along which to reduce. This can be multiple axes. keepdims : bool If ``True``, keep the dimensions along which the array was reduced. If ``False`` squeeze the output array. Currently only ``True`` is supported. Returns ------- out_array : np.ndarray The reduced array. """ @register_jitable def impl_keepdims(reduce_op, x, axis, keepdims=False): axis = np.atleast_1d(np.asarray(axis)) mask = np.zeros(x.ndim, dtype=np.bool8) mask[axis] = True original_shape = np.array(x.shape) squeezed_shape = original_shape[~mask] # this could be reversed, but we are calling a reduction op on it anyway new_axes = -np.arange(1, axis.size + 1) # not that this will copy if reduction happens along a non-contigous axis x_work = np.moveaxis(x, axis, new_axes) x_work = np.ascontiguousarray(x_work) total_reduce = np.prod(original_shape[axis]) total_keep = np.prod(squeezed_shape) tmp_shape = to_fixed_tuple(np.array((total_keep, total_reduce)), 2) x_work = np.reshape(x_work, tmp_shape) result = np.empty((total_keep, ), dtype=x_work.dtype) for idx in range(result.size): result[idx] = reduce_op(x_work[idx, ...]) new_shape = original_shape.copy() new_shape[axis] = 1 new_shape_tuple = to_fixed_tuple(new_shape, x.ndim) return np.reshape(result, new_shape_tuple) @register_jitable def impl_dropdims(reduce_op, x, axis, keepdims=False): axis = np.atleast_1d(np.asarray(axis)) if axis.size > 1: raise NotImplementedError("Numba can't np.squeeze yet.") result = impl_keepdims(reduce_op, x, axis) result = np.moveaxis(result, axis, 0) return result[0, ...] if numba.literally(keepdims).literal_value: return impl_keepdims else: return impl_dropdims
def foo_noop(dtype): return literally(dtype)
def degridder(uvw, gridstack, wavelengths, chanmap, cell, image_centre, phase_centre, convolution_kernel, convolution_kernel_width, convolution_kernel_oversampling, baseline_transform_policy, phase_transform_policy, stokes_conversion_policy, convolution_policy, vis_dtype=np.complex128): """ 2D Convolutional degridder, discrete to contiguous @uvw: value coordinates, (nrow, 3) @gridstack: complex gridded data, (nband, npix, npix) @wavelengths: wavelengths of data channels @chanmap: MFS band mapping per channel @cell: cell_size in degrees @image_centres: new phase centre of image (radians, ra, dec) @phase_centre: original phase centre of data (radians, ra, dec) @convolution_kernel: packed kernel as generated by kernels package @convolution_kernel_width: number of taps in kernel @convolution_kernel_oversampling: number of oversampled points in kernel @baseline_transform_policy: any accepted policy in .policies.baseline_transform_policies, can be used to tilt image planes for polyhedron faceting @phase_transform_policy: any accepted policy in .policies.phase_transform_policies, can be used to facet at provided facet @image_centre @stokes_conversion_policy: any accepted correlation to stokes conversion policy in .policies.stokes_conversion_policies @convolution_policy: any accepted convolution policy in .policies.convolution_policies @vis_dtype: accumulation vis dtype (default complex 128) """ if chanmap.size != wavelengths.size: raise ValueError( "Chanmap and corresponding wavelengths must match in shape") chanmap = chanmap.ravel() wavelengths = wavelengths.ravel() nband = np.max(chanmap) + 1 nrow = uvw.shape[0] npix = gridstack.shape[1] if gridstack.shape[1] != gridstack.shape[2]: raise ValueError("Grid must be square") nvischan = wavelengths.size ncorr = scp.ncorr_out(policy_type=literally(stokes_conversion_policy)) if gridstack.shape[0] < nband: raise ValueError( "Not enough channel bands in grid stack to match mfs band mapping") if uvw.shape[1] != 3: raise ValueError("UVW array must be array of tripples") if uvw.shape[0] != nrow: raise ValueError( "UVW array must have same number of rows as vis array") if nvischan != wavelengths.size: raise ValueError("Chanmap must correspond to visibility channels") vis = np.zeros((nrow, nvischan, ncorr), dtype=vis_dtype) # scale the FOV using the simularity theorem scale_factor = npix * cell / 3600.0 * np.pi / 180.0 for r in prange(nrow): degridder_row_kernel(uvw, gridstack, wavelengths, chanmap, cell, image_centre, phase_centre, convolution_kernel, convolution_kernel_width, convolution_kernel_oversampling, literally(baseline_transform_policy), literally(phase_transform_policy), literally(stokes_conversion_policy), literally(convolution_policy), vis_dtype=vis_dtype, nband=nband, nrow=nrow, npix=npix, nvischan=nvischan, ncorr=ncorr, vis=vis, scale_factor=scale_factor, r=r) return vis
def ol_specialize(x): iv = x.initial_value if iv is None: return lambda x: literally(x) # Force literal dispatch assert iv == {'a': 1, 'b': 2, 'c': 3} # INITIAL VALUE return lambda x: literally(x)
def prefilter(grid, V, out=None, k=3): return _prefilter(grid, V, numba.literally(k), out=out)
def dummy_getitem_impl(self, idx): return literally(idx)
def arrow_reader_get_table_cell_impl(table, col_idx, row_idx, ret): return literally(col_idx)
def struct_get_attr_offset(inst, attr): return _struct_get_attr_offset(inst, literally(attr))
def impl(self, dtype): return literally(dtype)
def test_power(x, n): return power(x, numba.literally(n))