예제 #1
0
def mobilenet_v2(num_classes):
    model = torchvision.models.mobilenet_v2(pretrained=True)
    model.classifier = nn.Sequential(
        nn.Dropout2d(p=0.3, inplace=False),
        nn.Linear(in_features=1280, out_features=num_classes, bias=True))
    init_weight(model.classifier)
    return model
예제 #2
0
 def __init__(self, num_classes):
     super(TinyNet, self).__init__()
     self.model = nn.Sequential(down_sam_blk(3, 16), down_sam_blk(16, 32),
                                down_sam_blk(32, 64), down_sam_blk(64, 128),
                                nn.Conv2d(128, num_classes, 1, 1, 0),
                                nn.AdaptiveAvgPool2d(1))
     init_weight(self.model)
예제 #3
0
def resnet18(num_classes):
    model = torchvision.models.resnet18(True)
    model.fc = nn.Sequential(
        nn.Dropout2d(0.3),
        nn.Linear(in_features=512, out_features=num_classes, bias=True))
    init_weight(model.fc)
    return model
예제 #4
0
def squeezenet1_1(num_classes):
    model = torchvision.models.squeezenet1_1(pretrained=True)
    model.classifier = nn.Sequential(
        nn.Dropout(p=0.5, inplace=False),
        nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1)),
        nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)))
    init_weight(model.classifier)
    return model
예제 #5
0
 def __init__(self, num_classes):
     super(Net2, self).__init__()
     self.conv1_1 = BasicConv2d(3, 16)
     # self.conv1_2 = BasicConv2d(16, 16, pool=False)
     self.conv2 = BasicConv2d(16, 32)
     self.conv3 = BasicConv2d(32, 64)
     self.conv4 = BasicConv2d(64, 128)
     self.fc1 = nn.Linear(128 * 14 * 14, 120)
     self.fc2 = nn.Linear(120, 84)
     self.fc3 = nn.Linear(84, num_classes)
     init_weight(self)
예제 #6
0
 def __init__(self, num_classes):
     super(Net, self).__init__()
     self.conv1 = nn.Conv2d(3, 6, 5)
     self.norm1 = nn.BatchNorm2d(6)
     self.pool = nn.MaxPool2d(2, 2)
     self.conv2 = nn.Conv2d(6, 16, 5)
     self.norm2 = nn.BatchNorm2d(16)
     self.fc1 = nn.Linear(16 * 53 * 53, 120)
     self.fc2 = nn.Linear(120, 84)
     self.fc3 = nn.Linear(84, num_classes)
     init_weight(self)
예제 #7
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              padding=1,
              norm=True,
              activation='relu',
              pool=True):
     super(BasicConv2d, self).__init__()
     self.activation = activation
     self.conv = nn.Conv2d(in_channels,
                           out_channels,
                           kernel_size,
                           padding=padding)
     self.norm = nn.BatchNorm2d(out_channels) if norm else None
     self.pool = nn.MaxPool2d(2) if pool else None
     init_weight(self)
예제 #8
0
    def __init__(self, num_classes):
        super(Net1, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.norm1 = nn.BatchNorm2d(16)
        self.conv2 = nn.Conv2d(16, 32, 3)
        self.norm2 = nn.BatchNorm2d(32)
        self.conv3 = nn.Conv2d(32, 64, 3)
        self.norm3 = nn.BatchNorm2d(64)
        self.conv4 = nn.Conv2d(64, 128, 3)
        self.norm4 = nn.BatchNorm2d(128)
        self.conv5 = nn.Conv2d(128, 256, 3)
        self.norm5 = nn.BatchNorm2d(256)
        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc1 = nn.Linear(256, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, num_classes)

        init_weight(self)