def __init__(self, in_channels): super().__init__() self.conv1 = nn.Conv2d(in_channels, 10, 3) self.conv2 = nn.LayerChoice([nn.Conv2d(10, 10, 3), nn.MaxPool2d(3)]) self.conv3 = nn.LayerChoice([nn.Identity(), nn.Conv2d(10, 10, 1)]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(10, 1)
def __init__(self): super().__init__() self.conv = nn.LayerChoice([ nn.Conv2d(3, 1, 3), nn.Conv2d(3, 1, 5, padding=1), ]) self.pool = nn.MaxPool2d(kernel_size=2)
def __init__(self, block, layers, num_classes=1000): super(ResNet, self).__init__() self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): torch.nn.init.constant_(m.weight, 1) torch.nn.init.constant_(m.bias, 0)
def __init__(self): super().__init__() channels = nn.ValueChoice([4, 6, 8]) self.conv1 = nn.Conv2d(1, channels, 5) self.pool1 = nn.LayerChoice([ nn.MaxPool2d((2, 2)), nn.AvgPool2d((2, 2)) ]) self.conv2 = nn.Conv2d(channels, 16, 5) self.pool2 = nn.LayerChoice([ nn.MaxPool2d(2), nn.AvgPool2d(2), nn.Conv2d(16, 16, 2, 2) ]) self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5*5 from image dimension self.fc2 = nn.Linear(120, 84) self.fcplus = nn.Linear(84, 84) self.shortcut = nn.InputChoice(2, 1) self.fc3 = nn.Linear(84, 10)
def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): super().__init__() if pool_type.lower() == 'max': self.pool = nn.MaxPool2d(kernel_size, stride, padding) elif pool_type.lower() == 'avg': self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) else: raise ValueError() self.bn = nn.BatchNorm2d(C, affine=affine)
# https://github.com/facebookresearch/unnas/blob/main/pycls/models/nas/operations.py OPS = { 'none': lambda C, stride, affine: Zero(stride), 'avg_pool_2x2': lambda C, stride, affine: nn.AvgPool2d( 2, stride=stride, padding=0, count_include_pad=False), 'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d( 3, stride=stride, padding=1, count_include_pad=False), 'avg_pool_5x5': lambda C, stride, affine: nn.AvgPool2d( 5, stride=stride, padding=2, count_include_pad=False), 'max_pool_2x2': lambda C, stride, affine: nn.MaxPool2d(2, stride=stride, padding=0), 'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1), 'max_pool_5x5': lambda C, stride, affine: nn.MaxPool2d(5, stride=stride, padding=2), 'max_pool_7x7': lambda C, stride, affine: nn.MaxPool2d(7, stride=stride, padding=3), 'skip_connect': lambda C, stride, affine: nn.Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine), 'conv_1x1': lambda C, stride, affine: nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C, C, 1, stride=stride, padding=0, bias=False), nn.BatchNorm2d(C, affine=affine)), 'conv_3x3':
def __init__(self): super().__init__() self.m = nn.MaxPool2d(3, 2, ceil_mode=True)
def __init__(self): super().__init__() self.m = nn.MaxPool2d(5, padding=(1, 2))
def __init__(self): super().__init__() self.conv = ConvRelu() self.pool = nn.MaxPool2d(kernel_size=2)
def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 1, 3) self.relu = nn.ReLU() self.pool = nn.MaxPool2d(kernel_size=2)