def test_rolling_mean(self): mean_1d = _lowlevel.make_builtin_mean1d_arrfunc('float64', -1) rolling_mean = _lowlevel.make_rolling_arrfunc(mean_1d, 4) in0 = nd.array([3.0, 2, 1, 3, 8, nd.nan, nd.nan]) out = rolling_mean(in0) result = nd.as_py(out) self.assertTrue(np.all(np.isnan(result[:3]))) self.assertTrue(np.isnan(result[-1])) self.assertEqual(result[3:-1], [9.0/4, 14.0/4, 12.0/3])
def op_ckernel(self, op): op_ndim = len(op.type.shape) result_ndim = self.env.get('result-ndim', 0) ckernel, args = op.args in_types = [self.get_arg_type(arg) for arg in args[1:]] out_type = ndt.type(str(args[0].type)) if isinstance(ckernel, dict): tag = ckernel['tag'] if tag == 'elwise': ck = ckernel['ckernel'] if op.metadata['rank'] < op_ndim and \ self.env.get('stream-outer', False) and result_ndim == op_ndim: # Replace the leading dimension type with 'strided' in each operand # if we're streaming it for processing BLZ # TODO: Add dynd tp.subarray(N) function like datashape has for i, tp in enumerate(in_types): if tp.ndim == result_ndim: in_types[i] = ndt.make_strided_dim(tp.element_type) out_type = ndt.make_strided_dim(out_type.element_type) op.args[0] = _lowlevel.lift_arrfunc(ck) elif tag == 'reduction': ck = ckernel['ckernel'] assoc = ckernel['assoc'] comm = ckernel['comm'] ident = ckernel['ident'] ident = None if ident is None else nd.asarray(ident) axis = ckernel['axis'] keepdims = ckernel['keepdims'] op.args[0] = _lowlevel.lift_reduction_arrfunc( ck, in_types[0], axis=axis, keepdims=keepdims, associative=assoc, commutative=comm, reduction_identity=ident) elif tag == 'rolling': ck = ckernel['ckernel'] window = ckernel['window'] minp = ckernel['minp'] if minp != 0: raise ValueError('rolling window with minp != 0 not supported yet') op.args[0] = _lowlevel.make_rolling_arrfunc(ck, window) elif tag == 'ckfactory': ckfactory = ckernel['ckernel_factory'] ck = ckfactory(out_type, *in_types) op.args[0] = ck else: raise RuntimeError('unnrecognized ckernel tag %s' % tag) else: op.args[0] = ckernel
def test_diff_op(self): # Use the numpy subtract ufunc for this lifting test af = _lowlevel.arrfunc_from_ufunc(np.subtract, (np.float64, np.float64, np.float64), False) # Lift it to 1D diff_1d = _lowlevel.lift_reduction_arrfunc(af, 'fixed * float64', axis=0, commutative=False, associative=False) # Apply it as a rolling op diff = _lowlevel.make_rolling_arrfunc(diff_1d, 2) in0 = nd.array([1.5, 3.25, 7, -3.5, 1.25]) out = diff(in0) result = nd.as_py(out) self.assertTrue(np.isnan(result[0])) self.assertEqual(result[1:], [3.25 - 1.5 , 7 - 3.25, -3.5 - 7, 1.25 - -3.5])
x.add_overload( "(%s) -> %s" % (typ.__name__, typ.__name__), _lowlevel.arrfunc_from_ufunc(np_op, (typ,) * 3, False), associative=True, commutative=True, identity=ident, ) locals()[name] = x # ------------------------------------------------------------------------ # Other Funcs # ------------------------------------------------------------------------ rolling_mean = RollingWindowBlazeFunc("blaze", "rolling_mean") mean1d = _lowlevel.make_builtin_mean1d_arrfunc("float64", 0) rolling_mean.add_overload("(M * float64) -> M * float64", mean1d) diff = BlazeFunc("blaze", "diff") subtract_doubles_ck = _lowlevel.arrfunc_from_ufunc(np.subtract, (np.float64, np.float64, np.float64), False) diff_pair_ck = _lowlevel.lift_reduction_arrfunc( subtract_doubles_ck, "strided * float64", axis=0, commutative=False, associative=False ) diff_ck = _lowlevel.make_rolling_arrfunc(diff_pair_ck, 2) diff.add_overload("(M * float64) -> M * float64", diff_ck) take = BlazeFunc("blaze", "take") # Masked take take.add_overload("(M * T, M * bool) -> var * T", _lowlevel.make_take_arrfunc()) # Indexed take take.add_overload("(M * T, N * intptr) -> N * T", _lowlevel.make_take_arrfunc())