def __init__(self): super().__init__() channels = nn.ValueChoice([4, 6, 8]) self.conv1 = nn.Conv2d(1, channels, 5) self.pool1 = nn.LayerChoice([ nn.MaxPool2d((2, 2)), nn.AvgPool2d((2, 2)) ]) self.conv2 = nn.Conv2d(channels, 16, 5) self.pool2 = nn.LayerChoice([ nn.MaxPool2d(2), nn.AvgPool2d(2), nn.Conv2d(16, 16, 2, 2) ]) self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5*5 from image dimension self.fc2 = nn.Linear(120, 84) self.fcplus = nn.Linear(84, 84) self.shortcut = nn.InputChoice(2, 1) self.fc3 = nn.Linear(84, 10)
def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): super().__init__() if pool_type.lower() == 'max': self.pool = nn.MaxPool2d(kernel_size, stride, padding) elif pool_type.lower() == 'avg': self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) else: raise ValueError() self.bn = nn.BatchNorm2d(C, affine=affine)
def __init__(self, input_size, C, n_classes): """ assuming input size 7x7 or 8x8 """ assert input_size in [7, 8] super().__init__() self.net = nn.Sequential( nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=input_size - 5, padding=0, count_include_pad=False), # 2x2 out nn.Conv2d(C, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, kernel_size=2, bias=False), # 1x1 out nn.BatchNorm2d(768), nn.ReLU(inplace=True) ) self.linear = nn.Linear(768, n_classes)
def __init__(self, C: int, num_labels: int, dataset: Literal['imagenet', 'cifar']): super().__init__() if dataset == 'imagenet': # assuming input size 14x14 stride = 2 elif dataset == 'cifar': stride = 3 self.features = nn.Sequential( nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=stride, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True)) self.classifier = nn.Linear(768, num_labels)
def __init__(self, input_size=224, first_conv_channels=16, last_conv_channels=1024, n_classes=1000, affine=False): super().__init__() assert input_size % 32 == 0 self.stage_blocks = [4, 4, 8, 4] self.stage_channels = [64, 160, 320, 640] self._input_size = input_size self._feature_map_size = input_size self._first_conv_channels = first_conv_channels self._last_conv_channels = last_conv_channels self._n_classes = n_classes self._affine = affine self._layerchoice_count = 0 # building first layer self.first_conv = nn.Sequential( nn.Conv2d(3, first_conv_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(first_conv_channels, affine=affine), nn.ReLU(inplace=True), ) self._feature_map_size //= 2 p_channels = first_conv_channels features = [] for num_blocks, channels in zip(self.stage_blocks, self.stage_channels): features.extend(self._make_blocks(num_blocks, p_channels, channels)) p_channels = channels self.features = nn.Sequential(*features) self.conv_last = nn.Sequential( nn.Conv2d(p_channels, last_conv_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(last_conv_channels, affine=affine), nn.ReLU(inplace=True), ) self.globalpool = nn.AvgPool2d(self._feature_map_size) self.dropout = nn.Dropout(0.1) self.classifier = nn.Sequential( nn.Linear(last_conv_channels, n_classes, bias=False), ) self._initialize_weights()
from nni.retiarii import model_wrapper from nni.retiarii.oneshot.pytorch.supermodule.sampling import PathSamplingRepeat from nni.retiarii.oneshot.pytorch.supermodule.differentiable import DifferentiableMixedRepeat from .utils.fixed import FixedFactory from .utils.pretrained import load_pretrained_weight # the following are NAS operations from # https://github.com/facebookresearch/unnas/blob/main/pycls/models/nas/operations.py OPS = { 'none': lambda C, stride, affine: Zero(stride), 'avg_pool_2x2': lambda C, stride, affine: nn.AvgPool2d( 2, stride=stride, padding=0, count_include_pad=False), 'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d( 3, stride=stride, padding=1, count_include_pad=False), 'avg_pool_5x5': lambda C, stride, affine: nn.AvgPool2d( 5, stride=stride, padding=2, count_include_pad=False), 'max_pool_2x2': lambda C, stride, affine: nn.MaxPool2d(2, stride=stride, padding=0), 'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1), 'max_pool_5x5': lambda C, stride, affine: nn.MaxPool2d(5, stride=stride, padding=2), 'max_pool_7x7': lambda C, stride, affine: nn.MaxPool2d(7, stride=stride, padding=3), 'skip_connect':
def __init__(self): super().__init__() self.m = nn.AvgPool2d(3, 2, ceil_mode=True)
def __init__(self): super().__init__() self.m = nn.AvgPool2d(3, stride=2)