def test_major_axis(): vx = np.random.rand(10, 6, 4, 8) n = 5 k = 2 alpha = 1e-4 beta = 0.75 np_axis = 3 vx_squared = vx**2.0 vx_scales = [] for i in range(vx.shape[np_axis]): # if axis == 2: vx_squared[:, :, i-n//2:i+n//2+1, :] channel_slice = [slice(None)] * np_axis + \ [slice(max(0, i - n // 2), min(i + n // 2 + 1, vx.shape[np_axis]))] + \ [slice(None)] * (vx.ndim - np_axis - 1) vx_scales.append( np.sum(vx_squared[channel_slice], axis=np_axis, keepdims=True)) vx_scale = (np.concatenate(vx_scales, axis=np_axis) * alpha + k)**(-beta) vy = vx * vx_scale x = Variable(vx.shape, order=OrderNHWC) y, = LocalResponseNormalization(None, n=n, k=k, alpha=alpha, beta=beta)(x) generate_kernel_test_case( description=f"LocalResponseNormalization for major axis", backend=["webgpu", "fallback"], graph=Graph([x], [y]), inputs={x: vx}, expected={y: vy})
def _convert_local_response_normalization(converter: ChainerConverter, c_op: "chainer.functions.normalization.local_response_normalization.LocalResponseNormalization"): x = converter.get_variable(c_op.inputs[0]) n_opr = LocalResponseNormalization(None, n=c_op.n, k=c_op.k, alpha=c_op.alpha, beta=c_op.beta) y, = n_opr(x) converter.set_variable(c_op.outputs[0](), y)
def __call__(self, inputs: List[Variable]) -> Tuple[Variable]: conv_opr = LocalResponseNormalization(generate_unique_name(self.cfunc.label), n=self.cfunc.n, k=self.cfunc.k, alpha=self.cfunc.alpha, beta=self.cfunc.beta) opr_out, = conv_opr(inputs[0]) return opr_out,
def test_every_order(): orders = [OrderNHWC, OrderHWNC, OrderHWCN, OrderNCHW, OrderCNHW, OrderCHWN] for order in orders: op = LocalResponseNormalization(None, n=1, k=2, alpha=0.1, beta=0.2) x = Variable(np.arange(order.ndim) + 1, OrderNHWC) x.change_order(order) y, = op(x) for axis in y.order.axes: assert y.shape_dict[axis] == x.shape_dict[axis]