示例#1
0
 def __init__(self):
     super(GoNet, self).__init__()
     self.layer1 = BasicBlock(1, 64)
     self.layer2 = BasicBlock(64, 128)
     self.layer3 = BasicBlock(128, 256)
     self.layer4 = BasicBlock(256, 361)
     self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
示例#2
0
    def __init__(self, in_channels, out_channels, bilinear=False, residual=False, expansion=1):
        super(UpSamp, self).__init__()
        if bilinear:
            self.up = nn.Sequential(
                nn.Conv2d(in_channels, in_channels // 2, kernel_size=1, padding=0, stride=1),
                nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
                )
        else:
            self.up = nn.ConvTranspose2d(in_channels, in_channels // 2,\
                kernel_size=4, stride=2, padding=1)
        
        if residual:
            layers = []
            dimension_map = None
            if not in_channels == out_channels:
                dimension_map = nn.Sequential(
                    nn.Conv2d(in_channels, out_channels, kernel_size=1),
                    nn.BatchNorm2d(out_channels)
                )
            layers.append(BasicBlock(in_channels, out_channels, downsample=dimension_map))

            for _ in range(1, expansion):
                layers.append(BasicBlock(out_channels, out_channels, downsample=None))
            
            self.conv = nn.Sequential(*layers)
        else:
            self.conv = _DoubleConv(in_channels, out_channels)
示例#3
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.block1 = BasicBlock(3,
                                 64,
                                 2,
                                 downsample=self._downsample(3, 64 * 1,
                                                             2))  # 64
        self.block2 = BasicBlock(64,
                                 128,
                                 2,
                                 downsample=self._downsample(64, 128, 2))  # 32
        self.block3 = BasicBlock(128,
                                 256,
                                 2,
                                 downsample=self._downsample(128, 256,
                                                             2))  # 16
        self.block4 = BasicBlock(256,
                                 16,
                                 2,
                                 downsample=self._downsample(256, 16, 2))  # 8

        self.fct_decode = nn.Sequential(
            self._upblock(16, 64, 3),
            self._upblock(64, 128, 3),
            self._upblock(128, 128, 3),
            self._upblock(128, 16, 3),
        )
示例#4
0
    def __init__(self, f_channels=64, hr_res=(1024, 1024), multiplier=2):
        # self.hr_res = hr_res
        # self.lr_res = tuple(dim / multiplier for dim in hr_res)

        super(MDSR, self).__init__()

        # 3x3 convolution at beginning
        self.prior_conv_0 = nn.Conv2d(3, f_channels, 3, padding=1)

        # 2 5x5 convolutions specific to the multiplier
        self.prior_conv_1 = NoNormRes5(f_channels, f_channels)
        self.prior_conv_2 = NoNormRes5(f_channels, f_channels)

        # 16 or 80 3x3 resnet-relu blocks
        self.shared_blocks = []
        for _ in range(80):
            self.shared_blocks.append(
                BasicBlock(f_channels, f_channels,
                           norm_layer=nn.Identity).cuda())

        # sub-pixel convolution for upscaling
        self.upscale_pre = nn.Conv2d(f_channels,
                                     3 * multiplier * multiplier,
                                     3,
                                     padding=1)
        self.upscale = nn.PixelShuffle(multiplier)

        # 2 3x3 resnet-relu blocks for result
        self.out_pre_0 = BasicBlock(3, 3, norm_layer=nn.Identity, dilation=1)
        self.out_pre_1 = BasicBlock(3, 3, norm_layer=nn.Identity, dilation=1)
示例#5
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1,
                         out_channels=12,
                         kernel_size=(12, 12),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=12, planes=12)
     self.f2 = BasicBlock(inplanes=12, planes=12)
     self.f3 = nn.Conv2d(in_channels=12,
                         out_channels=24,
                         kernel_size=(10, 10),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=False,
                         padding_mode='zeros')
     self.f4 = nn.ReLU(inplace=False)
     self.f5 = nn.Linear(in_features=1536, out_features=10, bias=False)
     self.f6 = nn.LogSoftmax(dim=1)
示例#6
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1,
                         out_channels=15,
                         kernel_size=(6, 6),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=15, planes=15)
     self.f2 = BasicBlock(inplanes=15, planes=15)
     self.f3 = nn.Conv2d(in_channels=15,
                         out_channels=43,
                         kernel_size=(2, 2),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f4 = nn.Linear(in_features=20812, out_features=37, bias=False)
     self.f5 = nn.Linear(in_features=37, out_features=10, bias=True)
     self.f6 = nn.LogSoftmax(dim=1)
示例#7
0
 def __init__(self, nspeakers):
     super(SpeakerEmbedding2d, self).__init__()
     self.relu = nn.ReLU()
     self.net = nn.Sequential(
         nn.Conv2d(1, 4, kernel_size=3, stride=(2, 2), bias=False),
         nn.BatchNorm2d(4), nn.ELU(),
         nn.Conv2d(4,
                   16,
                   kernel_size=3,
                   stride=(2, 1),
                   padding=(0, 1),
                   bias=False), nn.BatchNorm2d(16), nn.ELU(),
         BasicBlock(16, 16),
         nn.Conv2d(16, 64, kernel_size=3, stride=(2, 2), bias=False),
         nn.BatchNorm2d(64), nn.ELU(), BasicBlock(64, 64),
         nn.Conv2d(64,
                   256,
                   kernel_size=3,
                   stride=(2, 1),
                   padding=(0, 1),
                   bias=False), nn.BatchNorm2d(256), nn.ELU(),
         nn.Conv2d(256, 128, kernel_size=3, stride=(2, 2), bias=True),
         AvgPool(2), Flatten())
     self.embedding_size = 512
     self.embedding = nn.Linear(896, 512)
     self.ln = nn.LayerNorm(512)
     self.classification = nn.Linear(512, nspeakers)
示例#8
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1,
                         out_channels=20,
                         kernel_size=(19, 19),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=20, planes=20)
     self.f2 = BasicBlock(inplanes=20, planes=20)
     self.f3 = BasicBlock(inplanes=20, planes=20)
     self.f4 = nn.Conv2d(in_channels=20,
                         out_channels=30,
                         kernel_size=(8, 8),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=False,
                         padding_mode='zeros')
     self.f5 = nn.Linear(in_features=270, out_features=10, bias=False)
     self.f6 = nn.LogSoftmax(dim=1)
示例#9
0
    def __init__(self, n, block, num_classes=10, lwf=False, num_source_cls=200, growing=False):
        self.inplanes = 16
        self.growing = growing
        super(CResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.block1 = BasicBlock(16, 16)
        self.blocks2 = []
        for i in range(n):
            self.blocks2.append(block(16))
        self.blocks2 = nn.Sequential(*self.blocks2)
        downsample = nn.Sequential(nn.Conv2d(16, 32, kernel_size=1, stride=2, bias=False),
                                    nn.BatchNorm2d(32))
        self.block3 = BasicBlock(16, 32, 2, downsample)
        self.blocks4 = []
        for i in range(n):
            self.blocks4.append(block(32))
        self.blocks4 = nn.Sequential(*self.blocks4)
        downsample = nn.Sequential(nn.Conv2d(32, 64, kernel_size=1, stride=2, bias=False),
                               nn.BatchNorm2d(64))
        self.block5 = BasicBlock(32, 64, 2, downsample)
        self.blocks6 = []
        for i in range(n):
            self.blocks6.append(block(64))
        self.blocks6 = nn.Sequential(*self.blocks6)
        if self.growing:
            self.block5_add = BasicBlock(32, 64, 2, downsample)
            self.blocks6_add = []
            for i in range(n):
                self.blocks6_add.append(block(64))
            self.blocks6_add = nn.Sequential(*self.blocks6_add)
            self.gamma = nn.Parameter(torch.zeros(1).fill_(10))
        self.avgpool = nn.AvgPool2d(8)
        self.view = View(-1)

        self.fc = nn.Linear(64,num_classes)

        self.lwf = lwf
        if self.lwf:
            self.lwf_lyr = nn.Linear(64, num_source_cls)

        self.alphas = nn.ParameterList([nn.Parameter(torch.rand(3, 1, 1, 1)*0.1),
                nn.Parameter(torch.rand(3, 1, 1, 1)*0.1),
                nn.Parameter(torch.rand(3, 1, 1, 1)*0.1)])

        if self.growing:
            self.fc = nn.Linear(64*2, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
 def __init__(self, in_features, num_classes, pretrained=False):
     super(RoIFeatureExtractor_new, self).__init__()
     self.fc_head = TwoMLPHead(in_channels=1280 * 7 * 7,
                               representation_size=in_features)
     layers = [
         BasicBlock(256 * 5, 1024 * 5),
         Bottleneck(1024 * 5, 1024 * 5),
         BasicBlock(256 * 5, 1024 * 5),
         Bottleneck(1024 * 5, 1024 * 5),
         BasicBlock(256 * 5, 1024 * 5),
         Bottleneck(1024 * 5, 1024 * 5)
     ]
     self.conv_head = nn.Sequential(*layers)
示例#11
0
def test_cuda_amp(tmp_path, inv_enabled, amp_enabled, use_checkpointing):
    if not torch.cuda.is_available() and amp_enabled:
        pytest.skip("This test requires a GPU to be available")
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model = resnet18(num_classes=10)
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    trainset = torchvision.datasets.CIFAR10(root=tmp_path,
                                            train=True,
                                            download=True,
                                            transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=2)

    # Replace layer1
    if not use_checkpointing:
        model.layer1 = nn.Sequential(
            InvertibleBlock(BasicBlock(32, 32),
                            keep_input=False,
                            enabled=inv_enabled),
            InvertibleBlock(BasicBlock(32, 32),
                            keep_input=False,
                            enabled=inv_enabled),
        )
    else:
        model.layer1 = nn.Sequential(CheckPointBlock(BasicBlock(32, 32)),
                                     CheckPointBlock(BasicBlock(32, 32)))

    model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    scaler = GradScaler(enabled=amp_enabled)

    for i, data in enumerate(trainloader):
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        with autocast(enabled=amp_enabled):
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()
        break
示例#12
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1, out_channels=43, kernel_size=(13, 13), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=43, planes=43)
     self.f2 = nn.Conv2d(in_channels=43, out_channels=45, kernel_size=(9, 9), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
     self.f3 = nn.Linear(in_features=2880, out_features=10, bias=False)
     self.f4 = nn.LogSoftmax(dim=1)
示例#13
0
 def __init__(self):
     super().__init__()
     self.res2_conv = nn.Conv2d(512, 1, 1)
     self.res3_conv = nn.Conv2d(1024, 1, 1)
     self.res4_conv = nn.Conv2d(2048, 1, 1)
     self.res1 = BasicBlock(64, 64, 1)
     self.res2 = BasicBlock(32, 32, 1)
     self.res3 = BasicBlock(16, 16, 1)
     self.res1_pre = nn.Conv2d(64, 32, 1)
     self.res2_pre = nn.Conv2d(32, 16, 1)
     self.res3_pre = nn.Conv2d(16, 8, 1)
     self.gate1 = GatedConv(32, 32)
     self.gate2 = GatedConv(16, 16)
     self.gate3 = GatedConv(8, 8)
     self.gate = nn.Conv2d(8, 1, 1, bias=False)
     self.fuse = nn.Conv2d(2, 1, 1, bias=False)
示例#14
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1,
                         out_channels=53,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=53, planes=53)
     self.f2 = nn.Conv2d(in_channels=53,
                         out_channels=59,
                         kernel_size=(1, 1),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f3 = nn.ReLU(inplace=False)
     self.f4 = nn.Linear(in_features=39884, out_features=117, bias=False)
     self.f5 = nn.ReLU(inplace=False)
     self.f6 = nn.Linear(in_features=117, out_features=10, bias=False)
     self.f7 = nn.LogSoftmax(dim=1)
示例#15
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1, out_channels=54, kernel_size=(20, 20), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=54, planes=54)
     self.f2 = nn.Conv2d(in_channels=54, out_channels=14, kernel_size=(7, 7), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=False, padding_mode='zeros')
     self.f3 = nn.Linear(in_features=126, out_features=10, bias=False)
     self.f4 = nn.LogSoftmax(dim=1)
示例#16
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1, out_channels=56, kernel_size=(2, 2), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=56, planes=56)
     self.f2 = nn.Conv2d(in_channels=56, out_channels=27, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
     self.f3 = nn.Linear(in_features=16875, out_features=10, bias=True)
     self.f4 = nn.LogSoftmax(dim=1)
示例#17
0
def _make_block_group(in_planes, out_planes, n_blocks, stride=1):
    downsample = None
    if stride != 1 or in_planes != out_planes:
        downsample = nn.Sequential(
            nn.Conv2d(in_planes,
                      out_planes,
                      kernel_size=1,
                      stride=stride,
                      bias=False),
            nn.BatchNorm2d(out_planes),
        )

    layers = [BasicBlock(in_planes, out_planes, stride, downsample)]
    layers += [BasicBlock(out_planes, out_planes) for _ in range(n_blocks)]

    return nn.Sequential(*layers)
示例#18
0
def residual_basic_block(feature_dim=256,
                         num_blocks=1,
                         l2norm=True,
                         final_conv=False,
                         norm_scale=1.0,
                         out_dim=None,
                         interp_cat=False):
    """Construct a network block based on the BasicBlock used in ResNet 18 and 34."""
    if out_dim is None:
        out_dim = feature_dim
    feat_layers = []
    if interp_cat:
        feat_layers.append(InterpCat())
    for i in range(num_blocks):
        odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim
        feat_layers.append(BasicBlock(feature_dim, odim))
    if final_conv:
        feat_layers.append(
            nn.Conv2d(feature_dim,
                      out_dim,
                      kernel_size=3,
                      padding=1,
                      bias=False))
    if l2norm:
        feat_layers.append(InstanceL2Norm(scale=norm_scale))
    return nn.Sequential(*feat_layers)
示例#19
0
    def __init__(self,
                 image_size: int,
                 latent_size: int,
                 num_input_channels: int,
                 size_channel_map: dict,
                 *,
                 target_size: int = 4,
                 stylegan_variant: int = 2):
        super().__init__()

        self.image_size = image_size
        self.latent_size = latent_size
        self.stylegan_variant = stylegan_variant
        self.size_channel_map = size_channel_map

        self.log_input_size = int(math.log(image_size, 2))
        self.log_target_size = int(math.log(target_size, 2))
        assert image_size > target_size, "Input size must be larger than target size"
        assert 2**self.log_input_size == image_size, "Input size must be a power of 2"
        assert 2**self.log_target_size == target_size, "Target size must be a power of 2"

        self.start_block = BasicBlock(
            num_input_channels,
            size_channel_map[image_size],
            downsample=nn.Sequential(
                nn.Conv2d(num_input_channels,
                          size_channel_map[image_size],
                          kernel_size=1,
                          stride=1),
                nn.BatchNorm2d(size_channel_map[image_size])))
        self.intermediate_block = BasicBlock(
            size_channel_map[image_size],
            size_channel_map[image_size],
        )

        self.resnet_blocks = [
            BasicBlock(in_planes := size_channel_map[2**current_size],
                       out_planes := size_channel_map[2**(current_size - 1)],
                       stride=2,
                       downsample=nn.Sequential(
                           nn.Conv2d(in_planes,
                                     out_planes,
                                     kernel_size=1,
                                     stride=2), nn.BatchNorm2d(out_planes)))
            for current_size in range(self.log_input_size,
                                      self.log_target_size, -1)
        ]
示例#20
0
    def __init__(self, resnet, layers, dropout=0):
        super(UResNet, self).__init__()

        self.resnet = resnet

        self.de_1 = UResNetLayer(512, 256, layers[3])
        self.de_2 = UResNetLayer(256, 128, layers[2])
        self.de_3 = UResNetLayer(128, 64, layers[1])
        self.de_4 = UResNetLayer(64, 64, layers[0])

        self.dropout = nn.Dropout2d(dropout)

        self.classifier = nn.Sequential(*[
            BasicBlock(64, 64),
            BasicBlock(64, 64),
            nn.Conv2d(64, 1, kernel_size=1, bias=True),
        ])
示例#21
0
 def __init__(self, in_channels, out_channels, residual=False, expansion=1):
     super(InConv, self).__init__()
     if residual:
         layers = []
         dimension_map = None
         if not in_channels == out_channels:
             dimension_map = nn.Sequential(
                 nn.Conv2d(in_channels, out_channels, kernel_size=1),
                 nn.BatchNorm2d(out_channels)
             )
         layers.append(BasicBlock(in_channels, out_channels, downsample=dimension_map))
         
         for _ in range(1, expansion):
             layers.append(BasicBlock(out_channels, out_channels, downsample=None))
         
         self.conv = nn.Sequential(*layers)
     else:
         self.conv = _DoubleConv(in_channels, out_channels)
示例#22
0
    def __init__(self, in_channels=5, norm_layer=nn.BatchNorm2d):
        super(Resnet18Features, self).__init__()

        self.in_channels = in_channels
        self.norm_layer = norm_layer

        self.conv1 = nn.Conv2d(self.in_channels,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = self.norm_layer(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = BasicBlock(64, 64, norm_layer=self.norm_layer)
        self.layer2 = BasicBlock(64,
                                 128,
                                 stride=2,
                                 norm_layer=self.norm_layer,
                                 downsample=nn.Sequential(
                                     conv1x1(64, 128, stride=2),
                                     self.norm_layer(128)))
        self.layer3 = BasicBlock(128,
                                 256,
                                 stride=2,
                                 norm_layer=self.norm_layer,
                                 downsample=nn.Sequential(
                                     conv1x1(128, 256, stride=2),
                                     self.norm_layer(256)))
        self.layer4 = BasicBlock(256,
                                 512,
                                 stride=2,
                                 norm_layer=self.norm_layer,
                                 downsample=nn.Sequential(
                                     conv1x1(256, 512, stride=2),
                                     self.norm_layer(512)))

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

        self.init_weights()
示例#23
0
def get_network(nf, nf2, n_classes):
    """
    """
    ds = nn.Sequential(
        conv1x1(nf, nf2),
        nn.AvgPool2d(2),
    )
    resblock1 = BasicBlock(inplanes=nf, planes=nf2, stride=2, downsample=ds)
    resblock2 = BasicBlock(inplanes=nf2,
                           planes=nf2,
                           stride=2,
                           downsample=nn.AvgPool2d(2))
    fn = nn.Sequential(
        resblock1,
        resblock2,
        nn.AdaptiveAvgPool2d(1),
        Flatten(),
        nn.Linear(nf2, n_classes),
    )
    return fn
示例#24
0
 def __init__(self, inchannel=3, outchannel=3, ngf=32, reslayer=6):
     super(G_net, self).__init__()
     self.inconv = InConv(inchannel, ngf)
     self.down1 = DownSample(ngf, ngf * 2)
     self.down2 = DownSample(ngf * 2, ngf * 4)
     self.resnet = nn.Sequential()
     for i in range(reslayer):
         self.resnet.add_module('res{0}'.format(i),
                                BasicBlock(ngf * 4, ngf * 4))
     self.up1 = UpSample(ngf * 4, ngf * 2)
     self.up2 = UpSample(ngf * 2, ngf)
     self.outconv = OutConv(ngf, outchannel)
 def __init__(self, num_classes=10):
     super().__init__()
     # BasicBlock.expansion=2
     self.features = nn.Sequential(
         nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3,
                   padding=1),
         nn.BatchNorm2d(64),
         BasicBlock(64, 64),
         nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1,
                   stride=2),  # 16 x 16
         nn.BatchNorm2d(64),
         BasicBlock(64, 64),
         nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1,
                   stride=2),  # 8 x 8
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True))
     self.classifier = nn.Sequential(nn.Dropout(p=0.5),
                                     nn.Linear(8 * 8 * 64, 4096),
                                     nn.Dropout(p=0.5),
                                     nn.ReLU(inplace=True),
                                     nn.Linear(4096, num_classes))
示例#26
0
def resnet_stage(in_channels: int, out_channels: int, num_blocks: int) -> List[nn.Module]:
    _layers: List[nn.Module] = []

    in_chan = in_channels
    for _ in range(num_blocks):
        downsample = None
        if in_chan != out_channels:
            downsample = nn.Sequential(*conv_sequence_pt(in_chan, out_channels, False, True, kernel_size=1))

        _layers.append(BasicBlock(in_chan, out_channels, downsample=downsample))
        in_chan = out_channels

    return _layers
示例#27
0
    def __init__(self, in_dim, dim, out_dim, nblocks=8):
        """
		Initializes a residual module.
		Args:
			To be added.
		"""
        super(Residual_Module, self).__init__()
        self.nblocks = nblocks
        assert self.nblocks > 0
        self.in_block = nn.utils.weight_norm(
            nn.Conv2d(in_dim, dim, (3, 3), stride=1, padding=1, bias=True))
        self.core_blocks = nn.ModuleList(
            [BasicBlock(dim, dim) for _ in range(nblocks)])
        self.out_block = nn.utils.weight_norm(
            nn.Conv2d(dim, out_dim, (1, 1), stride=1, padding=0, bias=True), )
示例#28
0
 def __init__(self):
     nn.Module.__init__(self)
     self.f0 = nn.Conv2d(in_channels=1,
                         out_channels=47,
                         kernel_size=(18, 18),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f1 = BasicBlock(inplanes=47, planes=47)
     self.f2 = BasicBlock(inplanes=47, planes=47)
     self.f3 = nn.Conv2d(in_channels=47,
                         out_channels=38,
                         kernel_size=(5, 5),
                         stride=(1, 1),
                         padding=(0, 0),
                         dilation=(1, 1),
                         groups=1,
                         bias=True,
                         padding_mode='zeros')
     self.f4 = nn.Linear(in_features=1862, out_features=10, bias=False)
     self.f5 = nn.LogSoftmax(dim=1)
 def __init__(self, **kwargs):
     super(MonteCarloResnet, self).__init__()
     self.p = kwargs.get('p', 0.2)
     self.num_outputs = kwargs.get('num_outputs', 2)
     self.num_samples = kwargs.get('num_samples', 10)
     self.model = nn.Sequential(
         nn.Conv2d(in_channels=3,
                   out_channels=32,
                   kernel_size=5,
                   stride=2,
                   padding=(2, 2)), nn.BatchNorm2d(32), nn.ReLU(),
         nn.MaxPool2d(3), nn.Dropout2d(p=self.p), BasicBlock(32, 32),
         nn.Flatten(), nn.Linear(4160, 64), nn.Dropout2d(p=self.p),
         nn.Linear(64, 32), nn.Dropout2d(p=self.p),
         nn.Linear(32, self.num_outputs))
示例#30
0
    def __init__(self, chan_in, time_step, zero_init_residual=False):
        super(CNN, self).__init__()

        self.chan_in = chan_in
        if chan_in == 3:
            self.conv1 = nn.Conv2d(chan_in,
                                   64,
                                   kernel_size=7,
                                   stride=2,
                                   padding=2,
                                   bias=False)
        else:
            self.chan1_conv = nn.Conv2d(chan_in,
                                        64,
                                        kernel_size=7,
                                        stride=2,
                                        padding=2,
                                        bias=False)
        self.relu = nn.ReLU(inplace=True)
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = nn.Sequential(*[BasicBlock(64, 64) for i in range(0, 3)])
        self.layer2 = nn.Sequential(*[BasicBlock(64, 128, stride=2,
                                      downsample=downsample(64, 128, 2))\
                                      if i == 0 else BasicBlock(128, 128)\
                                      for i in range(0, 4)])
        self.layer3 = nn.Sequential(*[BasicBlock(128, 256, stride=(1,2),
                                      downsample=downsample(128, 256, (1,2)))\
                                      if i == 0 else BasicBlock(256, 256)\
                                      for i in range(0, 6)])
        self.layer4 = nn.Sequential(*[BasicBlock(256, 512, stride=(1,2),
                                      downsample=downsample(256, 512, (1,2)))\
                                      if i == 0 else BasicBlock(512, 512)\
                                      for i in range(0, 3)])
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(time_step, 1))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, BasicBlock):
                    nn.init_constant_(m.bn2.weight, 0)