def forward(self, input_bit_width: Tensor) -> Tensor: bit_width_to_remove = self.bit_width_to_remove_impl() output_bit_width = torch.abs(input_bit_width - bit_width_to_remove) output_bit_width = tensor_clamp_ste(output_bit_width, self.min_overall_bit_width(), self.max_overall_bit_width()) return output_bit_width
def forward(self, input_bit_width: Tensor, zero_hw_sentinel: Tensor) -> Tensor: bit_width_to_remove = self.bit_width_to_remove_impl(zero_hw_sentinel) output_bit_width = torch.abs(input_bit_width - bit_width_to_remove) output_bit_width = tensor_clamp_ste(output_bit_width, self.min_overall_bit_width + zero_hw_sentinel, self.max_overall_bit_width + zero_hw_sentinel) #todo STE on max only return output_bit_width
def forward(self, x: torch.Tensor, min_val: torch.Tensor, max_val: torch.Tensor): return tensor_clamp_ste(x, min_val, max_val)