def __init__(self, rnorm_scale, rnorm_power): super(CNN, self).__init__() self.conv_layers = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=3, alpha=rnorm_scale, beta=rnorm_power, k=2), nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2), nn.ReLU(inplace=True), nn.AvgPool2d(stride=2, kernel_size=3), nn.LocalResponseNorm(size=3, alpha=rnorm_scale, beta=rnorm_power, k=2), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2), nn.ReLU(inplace=True), nn.AvgPool2d(stride=2, kernel_size=3)) self.fc1 = nn.Linear(576, 10)
def __init__(self, num_classes=10): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=2), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), ) self.classifier = nn.Sequential( nn.Dropout(p=0.5), nn.Linear(256 * 2 * 2, 4096, bias=1), nn.ReLU(inplace=True), nn.Dropout(p=0.5), nn.Linear(4096, 4096, bias=1), nn.ReLU(inplace=True), nn.Linear(4096, num_classes, bias=1), )
def __init__(self): super(SiameseNetwork, self).__init__() # Setting up the Sequential of CNN Layers self.cnn1 = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=1), nn.ReLU(inplace=True), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(3, stride=2), nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2), nn.ReLU(inplace=True), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(3, stride=2), nn.Dropout2d(p=0.3), nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride=2), nn.Dropout2d(p=0.3), ) # Defining the fully connected layers self.fc1 = nn.Sequential(nn.Linear(30976, 1024), nn.ReLU(inplace=True), nn.Dropout2d(p=0.5), nn.Linear(1024, 128), nn.ReLU(inplace=True), nn.Linear(128, 2))
def __init__(self): super(AlexNet, self).__init__() self.sequential = nn.Sequential( nn.Conv2d(3, 64, kernel_size=8, stride=2, padding=2), # 64, 31, 31 nn.ReLU(), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=1), # 64, 29, 29 nn.Conv2d(64, 192, kernel_size=5, stride=1, padding=2), # 192, 29, 29 nn.ReLU(), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), # 192, 14, 14 nn.Conv2d(192, 384, kernel_size=3, padding=1), # 384, 14, 14 nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), # 256, 14, 14 nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), # 256, 14, 14 nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2) # 256, 6, 6 ) self.classfifier = nn.Sequential( # inplace 是否进行覆盖 nn.Dropout(p=0.5, inplace=True), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(), nn.Dropout(p=0.5, inplace=True), nn.Linear(4096, 4096), nn.ReLU(), nn.Linear(4096, 200)) self.init_bias()
def __init__(self, num_classes=200): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), nn.ReLU(), nn.LocalResponseNorm(5), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.LocalResponseNorm(5), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), ) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(), nn.Linear(4096, num_classes), )
def __init__(self, num_classes=2): super(AlexNet2D, self).__init__() self.conv1 = nn.Conv2d(in_channel=3, out_channel=96, kernel_size=11, stride=4) #self.relu1 = nn.ReLU(inplace=True) self.norm1 = nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2) self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2) self.conv2 = nn.Conv2d(96, 256, 5, padding=2) #self.relu2 = nn.ReLU(inplace=True) self.norm2 = nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2) self.pool2 = nn.MaxPool2d(kernel_size=3, stide=2) self.conv3 = nn.Conv2d(256, 384, 3, padding=1) #self.relu3 = nn.ReLU(inplace=True) self.conv4 = nn.Conv2d(384, 384, 3, padding=1) #self.relu4 = nn.ReLU(inplace=True) self.conv5 = nn.Conv2d(384, 256, 3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=2) self.dropout = nn.Dropout(p=0.5, inplace=True) self.fc1 = nn.Linear(in_feature=256 * 6 * 6, out_feature=4096) self.fc2 = nn.Linear(in_feature=4096, out_feature=4096) self.fc3 = nn.Linear(in_feature=4096, out_featrue=num_classes) self.relu = nn.ReLU(inplace=True)
def __init__(self): super(ModelCNNCifar10, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d( in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2, ), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2), nn.LocalResponseNorm(4, alpha=0.001 / 9.0, beta=0.75, k=1), ) self.conv2 = nn.Sequential( nn.Conv2d( in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2, ), nn.ReLU(), nn.LocalResponseNorm(4, alpha=0.001 / 9.0, beta=0.75, k=1), nn.MaxPool2d(kernel_size=2, stride=2), ) self.fc1 = nn.Linear(8 * 8 * 32, 256) self.fc2 = nn.Linear(256, 10)
def __init__(self): super(GoogLeNet, self).__init__() self.pre_layers = nn.Sequential( nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(True), nn.MaxPool2d(3, stride=2, ceil_mode=True), nn.LocalResponseNorm(5), nn.Conv2d(64, 64, 1), nn.ReLU(True), nn.Conv2d(64, 192, 3, padding=1), nn.ReLU(True), nn.LocalResponseNorm(5), nn.MaxPool2d(3, stride=2, ceil_mode=True), ) self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
def __int__(self): super(GoogLeNet, self).__init__() self.pre_layers = nn.Sequential( nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(), nn.Conv2d(64, 64, kernel_size=1, stride=1), nn.ReLU(True), nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(True), nn.LocalResponseNorm(), nn.MaxPool2d(kernel_size=3, stride=2)) self.a3 = InceptionV1(192, 64, 96, 128, 16, 32, 32) self.b3 = InceptionV1(256, 128, 128, 192, 32, 96, 64) self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.a4 = InceptionV1(480, 192, 96, 208, 16, 48, 64) self.b4 = InceptionV1(512, 160, 112, 224, 24, 64, 64) self.c4 = InceptionV1(512, 128, 128, 256, 24, 64, 64) self.d4 = InceptionV1(512, 112, 144, 288, 32, 64, 64) self.e4 = InceptionV1(528, 256, 160, 320, 32, 128, 128) self.a5 = InceptionV1(832, 256, 160, 320, 32, 128, 128) self.b5 = InceptionV1(832, 384, 192, 384, 48, 128, 128) self.avg_pool = nn.AvgPool2d(kernel_size=7, stride=1) self.avg_pool5x5 = nn.AvgPool2d(kernel_size=5, stride=3) self.fc = nn.Linear(1024, 1000) self.fc_128 = nn.Linear(128, 1024) self.soft_max = nn.Softmax2d() self.conv1x1 = nn.Conv2d(512, 128, kernel_size=1, stride=1) self.conv1x1_2 = nn.Conv2d(528, 128, kernel_size=1, stride=1) self.dropout_4 = nn.Dropout(p=0.4) self.dropout_7 = nn.Dropout(p=0.7) self.relu = nn.ReLU()
def __init__(self): super(Encoder,self).__init__(); self.conv1 = nn.Sequential( nn.Conv2d(3,96,kernel_size = 11, stride = 4,padding = 0), nn.ReLU(), nn.MaxPool2d(kernel_size = 3, stride = 2,padding = 0), nn.LocalResponseNorm(size = 5,alpha = 0.0001,beta = 0.75), ) self.conv2 = nn.Sequential( nn.Conv2d(96,256,kernel_size = 5,stride = 1,padding = 2, groups = 2), nn.ReLU(), nn.MaxPool2d(kernel_size = 3,stride = 2,padding = 0 ), nn.LocalResponseNorm(size = 5, alpha = 0.0001,beta = 0.75), ) self.conv3 = nn.Sequential( nn.Conv2d(256,384,kernel_size = 3, stride = 1, padding = 1,groups = 1), nn.ReLU(), ) self.conv4 = nn.Sequential( nn.Conv2d(384,384,kernel_size = 3, stride = 1, padding = 1,groups = 2), nn.ReLU(), ) self.conv5 = nn.Sequential( nn.Conv2d(384,256,kernel_size = 3, stride = 1, padding = 1,groups = 2), nn.ReLU(), nn.MaxPool2d(kernel_size = 3, stride = 2,padding = 0), ); self.init_with_pretrained();
def __init__(self, n_classes=21): super(fcalexnet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=5, alpha=1e-4, beta=0.75, k=1), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=5, alpha=1e-4, beta=0.75, k=1), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.classifier = nn.Sequential( nn.Conv2d(256, 4096, kernel_size=6), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(4096, 4096, kernel_size=1), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(4096, n_classes, kernel_size=1), ) self.deconv = nn.ConvTranspose2d(n_classes, n_classes, kernel_size=63, stride=32, bias=False) self.min_size = 224
def __init__(self): super(saliency,self).__init__() self.sal_conv = nn.Sequential( #layer1 nn.Conv2d(3,96,kernel_size=11, stride=4,padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(5), #layer2 nn.Conv2d(96,256,kernel_size=5,stride=1,padding=2,groups=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3,stride=2), nn.LocalResponseNorm(5), #layer3 nn.Conv2d(256,384,kernel_size=3, stride=1,padding=1), nn.ReLU(), #layer4 nn.Conv2d(384,384,kernel_size=3, stride=1,padding=1,groups=2), nn.ReLU(), #layer5 nn.Conv2d(384,256,kernel_size=3, stride=1,padding=1,groups=2), nn.ReLU(), #layer5_red nn.Conv2d(256,1,kernel_size=1, stride=1), #output is 13x13x1 nn.ReLU())
def __init__(self, use_dropout): super(BigNet, self).__init__() self.conv1 = nn.Conv2d(3, 128, 5, 1, padding=2) self.pool1 = nn.MaxPool2d(3, 2) self.LRN1 = nn.LocalResponseNorm( size=3, alpha=0.0001, beta=0.75, k=math.log(128)) #alpha and beta as in the paper of Imagenet self.conv2 = nn.Conv2d(128, 128, 3, 1, padding=1) self.use_dropout = use_dropout self.dropout1 = nn.Dropout(0.7) self.conv3 = nn.Conv2d(128, 256, 3, 1, padding=1) self.LRN2 = nn.LocalResponseNorm(size=3, alpha=0.0001, beta=0.75, k=math.log(256)) self.dropout2 = nn.Dropout(0.6) self.conv4 = nn.Conv2d(256, 512, 3, 1, padding=1) self.dropout3 = nn.Dropout(0.5) self.conv5 = nn.Conv2d(512, 1024, 3, 1, padding=1) self.pool2 = nn.MaxPool2d(3, 2) self.LRN3 = nn.LocalResponseNorm(size=3, alpha=0.0001, beta=0.75, k=math.log(1024)) self.dropout4 = nn.Dropout(0.4) self.fc1 = nn.Linear(1024 * 3 * 3, 10)
def __init__(self, bit=64, num_classes=1000): super(AlexNet, self).__init__() self.module_name = "Alexnet" self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=2, k=1), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=2, k=1), nn.Conv2d(192, 384, kernel_size=3, padding=1, groups=2), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1, groups=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) self.hash = nn.Sequential(nn.ReLU(inplace=True), nn.Linear(num_classes, bit))
def __init__(self): super(HashingNet, self).__init__() self.nn1 = nn.Sequential( nn.Conv2d(1, 96, 5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(4, stride=4), nn.LocalResponseNorm(2), nn.Conv2d(96, 128, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(4, stride=4), nn.LocalResponseNorm(2), nn.Conv2d(128, 256, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(4, stride=4), nn.LocalResponseNorm(2), ) self.nn2 = nn.Sequential( nn.Linear(4096, 1024), nn.ReLU(inplace=True), nn.Linear(1024, 512), nn.ReLU(inplace=True), nn.Linear(512, 6), )
def __init__(self, Nj): super(AlexNet, self).__init__() self.Nj = Nj self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(p=0.5), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, Nj * 2), )
def __init__(self, num_classes=1000): """This defines the caffe version of alexnet""" super(AlexNetNN, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0), nn.ReLU(inplace=True), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75), nn.MaxPool2d(kernel_size=3, stride=2), # conv 2 nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2), nn.ReLU(inplace=True), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75), nn.MaxPool2d(kernel_size=3, stride=2), # conv 3 nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), # conv 4 nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2), nn.ReLU(inplace=True), # conv 5 nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2)) self.classifier = nn.Sequential(nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, num_classes))
def __init__(self, num_classes: int = 10) -> None: super().__init__() self.num_classes = num_classes self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), )
def __init__(self, n_cls=100, useLRN=True, useDropOut=True): super().__init__() self.feature = nn.Sequential( nn.Conv2d(3, 96, (11, 11), stride=4, padding=2), #(224 -11 + 4)/4 + 1 = 55 nn.ReLU(), nn.MaxPool2d((3, 3), 2), # (55-3)/2 + 1 = 27 nn.LocalResponseNorm(size=5, k=2) if useLRN else nn.Identity(), nn.Conv2d(96, 256, (5, 5), stride=1, padding=2), # (27-5 +4)/1 + 1 = 27 nn.ReLU(), nn.MaxPool2d((3, 3), stride=2), #(27-3)/2 + 1 = 13 nn.LocalResponseNorm(size=5, k=2) if useLRN else nn.Identity(), nn.Conv2d(256, 384, (3, 3), stride=1, padding=1), # (13-3 +2)/1 + 1 = 13 nn.ReLU(), nn.Conv2d(384, 384, (3, 3), stride=1, padding=1), # (13-3 +2)/1 + 1 = 13 nn.ReLU(), nn.Conv2d(384, 256, (3, 3), stride=1, padding=1), # (13-3 +2)/1 + 1 = 13 nn.ReLU(), nn.MaxPool2d((3, 3), stride=2), #(13-3)/2+1 = 6 ) self.classifier = nn.Sequential( nn.Dropout() if useDropOut else nn.Identity(), nn.Linear(6 * 6 * 256, 4096), nn.ReLU(), nn.Dropout() if useDropOut else nn.Identity(), nn.Linear(4096, 4096), nn.ReLU(), nn.Linear(4096, n_cls), ) self.loss_func = nn.CrossEntropyLoss()
def __init__(self, in_channels, n_classes): super(LeeEtAl, self).__init__() # The first convolutional layer applied to the input hyperspectral # image uses an inception module that locally convolves the input # image with two convolutional filters with different sizes # (1x1xB and 3x3xB where B is the number of spectral bands) self.conv_3x3 = nn.Conv3d( 1, 128, (3, 3, in_channels), stride=(1, 1, 2), padding=(1, 1, 0)) self.conv_1x1 = nn.Conv3d( 1, 128, (1, 1, in_channels), stride=(1, 1, 1), padding=0) self.name = 'LeeEtAl' # We use two modules from the residual learning approach # Residual block 1 self.conv1 = nn.Conv2d(256, 128, (1, 1)) self.conv2 = nn.Conv2d(128, 128, (1, 1)) self.conv3 = nn.Conv2d(128, 128, (1, 1)) # Residual block 2 self.conv4 = nn.Conv2d(128, 128, (1, 1)) self.conv5 = nn.Conv2d(128, 128, (1, 1)) # The layer combination in the last three convolutional layers # is the same as the fully connected layers of Alexnet self.conv6 = nn.Conv2d(128, 128, (1, 1)) self.conv7 = nn.Conv2d(128, 128, (1, 1)) self.conv8 = nn.Conv2d(128, n_classes, (9, 9)) self.lrn1 = nn.LocalResponseNorm(256) self.lrn2 = nn.LocalResponseNorm(128) # The 7 th and 8 th convolutional layers have dropout in training self.dropout = nn.Dropout(p=0.5) self.apply(self.weight_init)
def __init__(self, num_classes=10): super().__init__() self.net = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1), # (b x 96 x 55 x 55) nn.ReLU(), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), # section 3.3 nn.MaxPool2d(kernel_size=3, stride=2), # (b x 96 x 27 x 27) nn.Conv2d(96, 256, 5, padding=2), # (b x 256 x 27 x 27) nn.ReLU(), nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), # (b x 256 x 13 x 13) nn.Conv2d(256, 384, 3, padding=1), # (b x 384 x 13 x 13) nn.ReLU(), nn.Conv2d(384, 384, 3, padding=1), # (b x 384 x 13 x 13) nn.ReLU(), nn.Conv2d(384, 256, 3, padding=1), # (b x 256 x 13 x 13) nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), # (b x 256 x 6 x 6) ) # classifier is just a name for linear layers self.classifier = nn.Sequential( nn.Dropout(p=0.5, inplace=True), nn.Linear(in_features=(256 * 6 * 6), out_features=4096), nn.ReLU(), nn.Dropout(p=0.5, inplace=True), nn.Linear(in_features=4096, out_features=4096), nn.ReLU(), nn.Linear(in_features=4096, out_features=NUM_CLASSES), ) self.init_bias() # initialize bias
def __init__(self): super(TemporalStreamConvNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(), nn.MaxPool2d(3, stride=2), nn.LocalResponseNorm(2), nn.Conv2d(96, 256, kernel_size=5, stride=2), nn.ReLU(), nn.MaxPool2d(3, stride=2), nn.LocalResponseNorm(2), nn.Conv2d(256, 512, kernel_size=3), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3), nn.ReLU(), nn.MaxPool2d(3, stride=2) ) self.classifier = nn.Sequential( nn.Linear(2048, 4096), nn.Dropout(), nn.Linear(4096, 2048), nn.Dropout(), nn.Linear(2048, 51) )
def __init__(self, ): super(ConvNet, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=1), nn.ReLU(), nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2)) self.layer2 = nn.Sequential( nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2), nn.ReLU(), nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75, k=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.Dropout2d(p=0.3)) self.layer3 = nn.Sequential( nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1)) self.layer4 = nn.Sequential( nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1), nn.MaxPool2d(kernel_size=3, stride=2), nn.Dropout2d(p=0.3)) self.layer5 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nn.MaxPool2d(kernel_size=3, stride=2), nn.Dropout2d(p=0.3)) self.adap = nn.AdaptiveAvgPool3d((128, 6, 6)) self.layer6 = nn.Sequential(nn.Linear(4608, 512), nn.ReLU(), nn.Dropout(p=0.5)) self.layer7 = nn.Sequential(nn.Linear(512, 128), nn.ReLU()) self.sigmoid = nn.Sigmoid()
def __init__(self, nc=1): super(LBNet_1, self).__init__() self.W1 = nn.Sequential( nn.Conv2d(nc, 16, kernel_size=7, stride=1), nn.ReLU(), nn.LocalResponseNorm(5, 0.0001, 0.75, 2), nn.MaxPool2d(kernel_size=2, stride=2)) self.W2 = nn.Sequential( nn.Conv2d(nc, 16, kernel_size=7, stride=1), nn.ReLU(), nn.LocalResponseNorm(5, 0.0001, 0.75, 2), nn.MaxPool2d(kernel_size=2, stride=2)) self.convolutions = nn.Sequential( nn.Conv2d(16, 64, kernel_size=7, stride=1), nn.ReLU(), nn.LocalResponseNorm(5, 0.0001, 0.75, 2), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 256, kernel_size=7, stride=1) ) self.mlp = nn.Sequential( nn.Dropout(0.5), nn.Linear(21 * 21 * 256, 1), nn.Sigmoid() )
def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(3, 96, 11, 4) self.relu1 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(3,2) self.lrn_norm1 = nn.LocalResponseNorm(5) self.conv2 = nn.Conv2d(96,256,5,1,2) self.relu2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(3,2) self.lrn_norm2 = nn.LocalResponseNorm(5) self.conv3 = nn.Conv2d(256, 384, 3, 1, 1) self.relu3 = nn.ReLU(inplace=True) self.conv4 = nn.Conv2d(384, 384, 3, 1, 1, groups=2) self.relu4 = nn.ReLU(inplace=True) self.conv5 = nn.Conv2d(384, 256, 3, 1, 1, groups=2) self.relu5 = nn.ReLU(inplace=True) self.pool5 = nn.MaxPool2d(3, 2) self.flatten = Flatten() fc_input_neurons = 2304 self.fc6 = nn.Linear(fc_input_neurons, 4096) self.relu6 = nn.ReLU(inplace=True) self.drop6 = nn.Dropout(0.5) self.fc7 = nn.Linear(4096, 4096) self.relu7 = nn.ReLU(inplace=True) self.drop7 = nn.Dropout(0.5) self.fc8 = nn.Linear(4096, 128) self.fc9 = nn.Linear(128, LABEL_NUM) pass
def __init__(self, output): super(DDAlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(5, alpha=0.0001, beta=0.75), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.regression = nn.Sequential(nn.Linear(256 * 5 * 7, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 256), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(256, output), nn.Sigmoid())
def __init__(self, n_classes=100, dropout=True): super(AlexNetCaffe, self).__init__() self.features = nn.Sequential(OrderedDict([ ("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)), ("relu1", nn.ReLU(inplace=True)), ("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)), ("relu2", nn.ReLU(inplace=True)), ("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)), ("relu3", nn.ReLU(inplace=True)), ("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)), ("relu4", nn.ReLU(inplace=True)), ("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)), ("relu5", nn.ReLU(inplace=True)), ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ])) self.classifier = nn.Sequential(OrderedDict([ ("fc6", nn.Linear(256 * 6 * 6, 4096)), ("relu6", nn.ReLU(inplace=True)), ("drop6", nn.Dropout() if dropout else Id()), ("fc7", nn.Linear(4096, 4096)), ("relu7", nn.ReLU(inplace=True)), ("drop7", nn.Dropout() if dropout else Id())])) self.class_classifier = nn.Linear(4096, n_classes)
def __init__(self): super(features, self).__init__() self.features = nn.Sequential( OrderedDict([ ("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)), ("relu1", nn.ReLU(inplace=True)), ("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)), ("relu2", nn.ReLU(inplace=True)), ("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)), ("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)), ("relu3", nn.ReLU(inplace=True)), ("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)), ("relu4", nn.ReLU(inplace=True)), ("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)), ("relu5", nn.ReLU(inplace=True)), ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)), ]))
def __init__(self, num_classes=1000): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=4), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=5, alpha=1e-04, beta=0.75, k=1), nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.LocalResponseNorm(size=5, alpha=1e-04, beta=0.75, k=1), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.classifier = nn.Sequential( nn.Linear(256 * 6 * 6, 4096), # 0 nn.ReLU(inplace=True), # 1 nn.Dropout(0.5), # 2 nn.Linear(4096, 4096), # 3 nn.ReLU(inplace=True), # 4 nn.Dropout(0.5), # 5 nn.Linear(4096, num_classes), # 6 )
def _make_layers(self, in_channels, pool_type): layers = [] layers += [BasicConv2d(in_channels, 64, kernel_size=3, padding=1)] if pool_type == "max": layers += [nn.MaxPool2d(kernel_size=3, stride=2)] elif pool_type == "avg": layers += [nn.AvgPool2d(kernel_size=3, stride=2)] layers += [nn.LocalResponseNorm(size=5)] layers += [BasicConv2d(64, 128, kernel_size=1)] layers += [BasicConv2d(128, 256, kernel_size=3, padding=1)] layers += [nn.LocalResponseNorm(size=5)] if pool_type == "max": layers += [nn.MaxPool2d(kernel_size=3, stride=2)] elif pool_type == "avg": layers += [nn.AvgPool2d(kernel_size=3, stride=2)] layers += [InceptionA(256, pool_features=32)] # 224 + 32 layers += [InceptionA(256, pool_features=64)] # 224 + 64 layers += [InceptionA(288, pool_features=128)] # 224 + 128 layers += [InceptionA(352, pool_features=256)] # 224 + 256 if pool_type == "max": layers += [nn.MaxPool2d(kernel_size=3)] elif pool_type == "avg": layers += [nn.AvgPool2d(kernel_size=3)] return nn.Sequential(*layers)