def __init__(self, optimized: bool = False): super(ToyClassifier, self).__init__() block_params = [(3, 18, 1), (18, 36, 2), (36, 74, 1), (74, 146, 2), (146, 290, 1), (290, 578, 2), (578, 1154, 1), (1154, 1154, 2)] if optimized: blocks = [ OptimizedConvBNReLU(in_channels=3, out_channels=_make_divisible( block_params[0][1], 8), stride=block_params[0][2]) ] for in_channels, out_channels, stride in block_params[1:]: blocks.append( OptimizedConvBNReLU( in_channels=_make_divisible(in_channels, 8), out_channels=_make_divisible(out_channels, 8), stride=stride)) in_features = _make_divisible(1154, 8) else: blocks = [ ConvBNReLU(in_channels=in_channels, out_channels=out_channels, stride=stride) for (in_channels, out_channels, stride) in block_params ] in_features = 1154 self.blocks = nn.ModuleList(blocks) self.pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Conv2d(in_channels=in_features, out_channels=1000, kernel_size=(1, 1)) self.quant = torch.quantization.QuantStub() self.dequant = torch.quantization.DeQuantStub()
def adjust_channels(channels: int, width_mult: float, min_value: Optional[int] = None) -> int: return _make_divisible(channels * width_mult, 8, min_value)
def adjust_channels(channels: int, width_mult: float): return _make_divisible(channels * width_mult, 8)
def __init__(self, input_channels: int, squeeze_factor: int = 4): super().__init__() squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8) self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1) self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)