def __init__(self, params): Model.check_parameters( params, { 'name': 'AlexNetOWT', 'input_shape': (3, 227, 227), 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32' }) Model.__init__(self, params) self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.classifier = nn.Sequential( nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, self.num_classes), )
def __init__(self, params): Model.check_parameters( params, {'input_shape':(3, 299, 299), 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32'} ) Model.__init__(self, params)
def __init__(self, params): specs = ResNet.specs[params['model']] Model.check_parameters( params, { 'name': specs['name'], 'input_shape': (3, 224, 224), 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32' }) Model.__init__(self, params) # Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py # Original author Wei Wu if specs['num_layers'] >= 50: filter_list = [64, 256, 512, 1024, 2048] bottle_neck = True else: filter_list = [64, 64, 128, 256, 512] bottle_neck = False self.features = nn.Sequential( nn.Conv2d(3, filter_list[0], kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(filter_list[0], eps=2e-5, momentum=0.9, affine=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) # Number of stages is always 4 num_prev_channels = filter_list[0] for i in range(len(specs['units'])): # num_input_channels, num_filters, stride, dim_match, bottle_neck self.features.add_module( 'stage%d_unit%d' % (i + 1, 1), ResnetModule(num_prev_channels, filter_list[i + 1], (1 if i == 0 else 2), False, bottle_neck)) num_prev_channels = filter_list[i + 1] for j in range(specs['units'][i] - 1): self.features.add_module( 'stage%d_unit%d' % (i + 1, j + 2), ResnetModule(num_prev_channels, filter_list[i + 1], 1, True, bottle_neck)) self.features.add_module('pool1', nn.AvgPool2d(kernel_size=7, padding=0)) self.num_output_channels = filter_list[-1] self.classifier = nn.Sequential( nn.Linear(self.num_output_channels, self.num_classes))
def __init__(self, params): Model.check_parameters( params, {'name': 'EngAcousticModel', 'input_shape':(540), 'num_classes': 8192, 'phase': 'training', 'dtype': 'float32'} ) Model.__init__(self, params) self.model = nn.Sequential() prev_size = self.input_shape[0] for idx in range(5): self.model.add_module('linear_%d' % idx, nn.Linear(prev_size, 2048)) self.model.add_module('relu_%d' % idx, nn.ReLU(inplace=True)) prev_size = 2048 self.model.add_module('classifier', nn.Linear(prev_size, self.num_classes))
def __init__(self, params): Model.check_parameters(params, {'name': 'InceptionV4'}) BaseInceptionModel.__init__(self, params) self.features = nn.Sequential( # Input conv modules ConvModule(3, num_filters=32, kernel_size=3, stride=2, padding=0), ConvModule(32, num_filters=32, kernel_size=3, stride=1, padding=0), ConvModule(32, num_filters=64, kernel_size=3, stride=1, padding=1), # Stem modules self.inception_v4_sa(64, index=0), self.inception_v4_sb(160, index=0), self.inception_v4_sc(192, index=0), # Four Type A modules self.inception_v4_a(384, index=0), self.inception_v4_a(384, index=1), self.inception_v4_a(384, index=2), self.inception_v4_a(384, index=3), # One Type A Reduction module self.inception_v4_ra(384, 0, 192, 224, 256, 384), # Seven Type B modules self.inception_v4_b(1024, index=0), self.inception_v4_b(1024, index=1), self.inception_v4_b(1024, index=2), self.inception_v4_b(1024, index=3), self.inception_v4_b(1024, index=4), self.inception_v4_b(1024, index=5), self.inception_v4_b(1024, index=6), # One Type B Reduction module self.inception_v4_rb(1024, index=0), # Three Type C modules self.inception_v4_c(1536, index=0), self.inception_v4_c(1536, index=1), self.inception_v4_c(1536, index=2), # Final global pooling nn.AvgPool2d(kernel_size=8, stride=1) ) self.classifier = nn.Sequential( # And classifier nn.Dropout(p=0.2), nn.Linear(1536, self.num_classes) )
def __init__(self, params): Model.check_parameters( params, { 'name': 'SensorNet', 'input_shape': (784), 'num_classes': 16, 'phase': 'training', 'dtype': 'float32' }) Model.__init__(self, params) self.model = nn.Sequential() prev_size = self.input_shape[0] for idx, layer_size in enumerate([1024, 1024, 1024]): self.model.add_module('linear_%d' % idx, nn.Linear(prev_size, layer_size)) self.model.add_module('relu_%d' % idx, nn.ReLU(inplace=True)) prev_size = layer_size self.model.add_module('classifier', nn.Linear(prev_size, self.num_classes))
def __init__(self, params): Model.check_parameters( params, { 'name': 'Overfeat', 'input_shape': (3, 231, 231), 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32' }) Model.__init__(self, params) self.features = nn.Sequential( # Layer1 nn.Conv2d(self.input_shape[0], 96, kernel_size=11, stride=4), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # Layer2 nn.Conv2d(96, 256, kernel_size=5), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # Layer3 nn.Conv2d(256, 512, kernel_size=3, padding=1), nn.ReLU(inplace=True), # Layer4 nn.Conv2d(512, 1024, kernel_size=3, padding=1), nn.ReLU(inplace=True), # Layer5 nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2)) self.classifier = nn.Sequential( # Layer6 nn.Linear(1024 * 6 * 6, 3072), nn.ReLU(inplace=True), nn.Dropout(), # Layer7 nn.Linear(3072, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, self.num_classes))
def __init__(self, params): specs = VGG.specs[params['model']] Model.check_parameters( params, { 'name': specs['name'], 'input_shape': (3, 224, 224), 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32' }) Model.__init__(self, params) # Features (CNN) self.features = nn.Sequential() layers, filters = specs['specs'] prev_filters = self.input_shape[0] for i, num in enumerate(layers): for j in range(num): self.features.add_module( 'conv%d_%d' % (i + 1, j + 1), nn.Conv2d(prev_filters, filters[i], kernel_size=3, padding=1)) self.features.add_module('relu%d_%d' % (i + 1, j + 1), nn.ReLU(inplace=True)) prev_filters = filters[i] self.features.add_module('pool%d' % (i + 1), nn.MaxPool2d(kernel_size=2, stride=2)) # Fully connected layers self.classifier = nn.Sequential(nn.Linear(512 * 7 * 7, 4096), nn.ReLU(inplace=True), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Dropout(p=0.5), nn.Linear(4096, self.num_classes))
def __init__(self, params): Model.check_parameters(params, {'name': 'InceptionV3'}) BaseInceptionModel.__init__(self, params) self.features = nn.Sequential( # Input conv modules ConvModule(3, num_filters=32, kernel_size=3, stride=2, padding=0), ConvModule(32, num_filters=32, kernel_size=3, stride=1, padding=0), ConvModule(32, num_filters=64, kernel_size=3, stride=1, padding=1), nn.MaxPool2d(kernel_size=3, stride=2), ConvModule(64, num_filters=80, kernel_size=1, stride=1, padding=0), ConvModule(80, num_filters=192, kernel_size=3, stride=1, padding=0), nn.MaxPool2d(kernel_size=3, stride=2), # Three Type A inception modules self.module_a(192, index=0, n=32), self.module_a(256, index=1, n=64), self.module_a(288, index=2, n=64), # One Type B inception module self.module_b(288, index=0), # Four Type C inception modules self.module_c(768, index=0, n=128), self.module_c(768, index=1, n=160), self.module_c(768, index=2, n=160), self.module_c(768, index=3, n=192), # One Type D inception module self.module_d(768, index=0), # Two Type E inception modules self.module_e(1280, index=0, pooltype='avg'), self.module_e(2048, index=1, pooltype='max'), # Final global pooling nn.AvgPool2d(kernel_size=8, stride=1) ) self.classifier = nn.Sequential( # And classifier nn.Dropout(p=0.2), nn.Linear(2048, self.num_classes) )
def __init__(self, params): """""" Model.check_parameters( params, { 'name': 'GoogleNet', 'input_shape': (3, 224, 224), 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32' }) Model.__init__(self, params) self.features = nn.Sequential( ConvModule(self.input_shape[0], 64, kernel_size=7, stride=2, padding=3), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), ConvModule(64, 64, kernel_size=1, stride=1), ConvModule(64, 192, kernel_size=3, stride=1, padding=1), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), InceptionModule(192, num_1x1=64, num_3x3red=96, num_3x3=128, num_d5x5red=16, num_d5x5=32, proj=32), # out channels = 256 InceptionModule(256, num_1x1=128, num_3x3red=128, num_3x3=192, num_d5x5red=32, num_d5x5=96, proj=64), # out channels = 480 nn.MaxPool2d(kernel_size=3, stride=2), InceptionModule(480, num_1x1=192, num_3x3red=96, num_3x3=208, num_d5x5red=16, num_d5x5=48, proj=64), # out channels = 512 InceptionModule(512, num_1x1=160, num_3x3red=112, num_3x3=224, num_d5x5red=24, num_d5x5=64, proj=64), # out channels = 512 InceptionModule(512, num_1x1=128, num_3x3red=128, num_3x3=256, num_d5x5red=24, num_d5x5=64, proj=64), # out channels = 512 InceptionModule(512, num_1x1=112, num_3x3red=144, num_3x3=288, num_d5x5red=32, num_d5x5=64, proj=64), # out channels = 528 InceptionModule(528, num_1x1=256, num_3x3red=160, num_3x3=320, num_d5x5red=32, num_d5x5=128, proj=128), # out channels = 832 nn.MaxPool2d(kernel_size=3, stride=2, padding=1), InceptionModule(832, num_1x1=256, num_3x3red=160, num_3x3=320, num_d5x5red=32, num_d5x5=128, proj=128), # out channels = 832 InceptionModule(832, num_1x1=384, num_3x3red=192, num_3x3=384, num_d5x5red=48, num_d5x5=128, proj=128), # out channels = 1024 nn.AvgPool2d(kernel_size=7, stride=1)) self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(1024, self.num_classes))