def hybrid_forward( self, F: ModuleType, x: Union[NDArray, Symbol], gradient_rescaler: Union[NDArray, Symbol] ) -> Tuple[Union[NDArray, Symbol], ...]: """ Overrides gluon.HybridBlock.hybrid_forward :param nd or sym F: ndarray or symbol module :param x: head input :param gradient_rescaler: gradient rescaler for partial blocking of gradient :return: head output """ grad_scaled_x = F.broadcast_mul( (1 - gradient_rescaler), F.BlockGrad(x)) + F.broadcast_mul( gradient_rescaler, x) out = self.head(grad_scaled_x) return out
def hybrid_forward( self, F: ModuleType, x: Union[NDArray, Symbol], gradient_rescaler: Union[NDArray, Symbol] ) -> Tuple[Union[NDArray, Symbol], ...]: """ Overrides gluon.HybridBlock.hybrid_forward :param nd or sym F: ndarray or symbol module :param x: head input :param gradient_rescaler: gradient rescaler for partial blocking of gradient :return: head output """ if self._onnx: # ONNX doesn't support BlockGrad() operator, but it's not typically needed for # ONNX because mostly forward calls are performed using ONNX exported network. grad_scaled_x = x else: grad_scaled_x = (F.broadcast_mul( (1 - gradient_rescaler), F.BlockGrad(x)) + F.broadcast_mul(gradient_rescaler, x)) out = self.head(grad_scaled_x) return out