Ejemplo n.º 1
0
    def __init__(self, in_size, out_size, inner_nc, dropout=0.0, innermost=False, outermost=False, submodule=None):
        super(UnetBlock, self).__init__()
        self.outermost = outermost

        downconv = nn.Conv(in_size, inner_nc, 4, stride=2, padding=1, bias=False)
        downnorm = nn.BatchNorm2d(inner_nc)
        downrelu = nn.LeakyReLU(0.2)
        upnorm = nn.BatchNorm2d(out_size)
        uprelu = nn.ReLU()

        if outermost:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose(inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]
            if dropout:
                model = down + [submodule] + up + [nn.Dropout(dropout)]
            else:
                model = down + [submodule] + up
        
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
Ejemplo n.º 2
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 baseWidth=26,
                 scale=4,
                 stype='normal'):
        """ Constructor
        Args:
            inplanes: input channel dimensionality
            planes: output channel dimensionality
            stride: conv stride. Replaces pooling layer.
            downsample: None when stride = 1
            baseWidth: basic width of conv3x3
            scale: number of scale.
            type: 'normal': normal set. 'stage': first block of a new stage.
        """
        super(Bottle2neck, self).__init__()

        width = int(math.floor(planes * (baseWidth / 64.0)))
        self.conv1 = nn.Conv2d(inplanes,
                               width * scale,
                               kernel_size=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(width * scale)

        if scale == 1:
            self.nums = 1
        else:
            self.nums = scale - 1
        if stype == 'stage':
            self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
        convs = []
        bns = []
        for i in range(self.nums):
            convs.append(
                nn.Conv2d(width,
                          width,
                          kernel_size=3,
                          stride=stride,
                          padding=1,
                          bias=False))
            bns.append(nn.BatchNorm2d(width))
        self.convs = nn.ModuleList(convs)
        self.bns = nn.ModuleList(bns)

        self.conv3 = nn.Conv2d(width * scale,
                               planes * self.expansion,
                               kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)

        self.relu = nn.ReLU()
        self.downsample = downsample
        self.stype = stype
        self.scale = scale
        self.width = width
Ejemplo n.º 3
0
 def __init__(self, input_dim, dim, numAngle, numRho):
     super(DHT_Layer, self).__init__()
     self.fist_conv = nn.Sequential(nn.Conv2d(input_dim, dim, 1),
                                    nn.BatchNorm2d(dim), nn.ReLU())
     self.dht = DHT(numAngle=numAngle, numRho=numRho)
     self.convs = nn.Sequential(nn.Conv2d(dim, dim, 3, 1, 1),
                                nn.BatchNorm2d(dim), nn.ReLU(),
                                nn.Conv2d(dim, dim, 3, 1, 1),
                                nn.BatchNorm2d(dim), nn.ReLU())
Ejemplo n.º 4
0
    def __init__(self, part_num):
        super(DGCNN_partseg, self).__init__()
        self.seg_num_all = part_num
        self.k = 40
        self.knn = KNN(self.k)
        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(64)
        self.bn3 = nn.BatchNorm2d(64)
        self.bn4 = nn.BatchNorm2d(64)
        self.bn5 = nn.BatchNorm2d(64)
        self.bn6 = nn.BatchNorm1d(1024)
        self.bn7 = nn.BatchNorm1d(64)
        self.bn8 = nn.BatchNorm1d(256)
        self.bn9 = nn.BatchNorm1d(256)
        self.bn10 = nn.BatchNorm1d(128)

        self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),
                                   self.bn1,
                                   nn.LeakyReLU(scale=0.2))
        self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False),
                                   self.bn2,
                                   nn.LeakyReLU(scale=0.2))
        self.conv3 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
                                   self.bn3,
                                   nn.LeakyReLU(scale=0.2))
        self.conv4 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False),
                                   self.bn4,
                                   nn.LeakyReLU(scale=0.2))
        self.conv5 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
                                   self.bn5,
                                   nn.LeakyReLU(scale=0.2))
        self.conv6 = nn.Sequential(nn.Conv1d(192, 1024, kernel_size=1, bias=False),
                                   self.bn6,
                                   nn.LeakyReLU(scale=0.2))
        self.conv7 = nn.Sequential(nn.Conv1d(16, 64, kernel_size=1, bias=False),
                                   self.bn7,
                                   nn.LeakyReLU(scale=0.2))
        self.conv8 = nn.Sequential(nn.Conv1d(1280, 256, kernel_size=1, bias=False),
                                   self.bn8,
                                   nn.LeakyReLU(scale=0.2))
        self.dp1 = nn.Dropout(p=0.5)
        self.conv9 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=1, bias=False),
                                   self.bn9,
                                   nn.LeakyReLU(scale=0.2))
        self.dp2 = nn.Dropout(p=0.5)
        self.conv10 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=False),
                                   self.bn10,
                                   nn.LeakyReLU(scale=0.2))
        self.conv11 = nn.Conv1d(128, self.seg_num_all, kernel_size=1, bias=False)
Ejemplo n.º 5
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  stype='stage',
                  baseWidth=self.baseWidth,
                  scale=self.scale))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      baseWidth=self.baseWidth,
                      scale=self.scale))

        return nn.Sequential(*layers)
Ejemplo n.º 6
0
 def discriminator_block(in_filters, out_filters, stride=2, normalization=True):
     'Returns downsampling layers of each discriminator block'
     layers = [nn.Conv(in_filters, out_filters, 4, stride=stride, padding=1)]
     if normalization:
         layers.append(nn.BatchNorm2d(out_filters))
     layers.append(nn.LeakyReLU(scale=0.2))
     return layers
Ejemplo n.º 7
0
    def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):
        self.inplanes = 64
        super(Res2Net, self).__init__()
        self.baseWidth = baseWidth
        self.scale = scale
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Ejemplo n.º 8
0
 def __init__(self, in_features, dropout=0.0):
     super(ResidualBlock, self).__init__()
     model = [
         nn.ReflectionPad2d(1),
         nn.Conv(in_features, in_features, 3),
         nn.BatchNorm2d(in_features),
         nn.ReLU()
     ]
     if dropout:
         model += [nn.Dropout(dropout)]
     model += [
         nn.ReflectionPad2d(1),
         nn.Conv(in_features, in_features, 3),
         nn.BatchNorm2d(in_features)
     ]
     self.conv_block = nn.Sequential(*model)
Ejemplo n.º 9
0
    def __init__(self, C, num_classes, layers, genotype):
        super(NetworkCIFAR, self).__init__()
        self._layers = layers

        stem_multiplier = 3
        C_curr = stem_multiplier * C
        self.stem = nn.Sequential(
            nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
            nn.BatchNorm2d(C_curr)
        )

        C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
        self.cells = nn.ModuleList()
        reduction_prev = False
        for i in range(layers):
            if i in [layers // 3, 2 * layers // 3]:
                C_curr *= 2
                reduction = True
            else:
                reduction = False
            cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
            reduction_prev = reduction
            self.cells.append(cell)
            C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr

        self.global_pooling = nn.AdaptiveAvgPool2d(1)
        self.classifier = nn.Linear(C_prev, num_classes)
Ejemplo n.º 10
0
 def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
     super(ReLUConvBN, self).__init__()
     self.op = nn.Sequential(
         nn.ReLU(),
         nn.Conv2d(C_in,
                   C_out,
                   kernel_size,
                   stride=stride,
                   padding=padding,
                   bias=False), nn.BatchNorm2d(C_out, affine=affine))
Ejemplo n.º 11
0
 def __init__(self, in_size, out_size, normalize=True, dropout=0.0):
     super(UNetDown, self).__init__()
     layers = [
         nn.Conv(in_size, out_size, 4, stride=2, padding=1, bias=False)
     ]
     if normalize:
         layers.append(nn.BatchNorm2d(out_size))
     layers.append(nn.LeakyReLU(scale=0.2))
     if dropout:
         layers.append(nn.Dropout(dropout))
     self.model = nn.Sequential(*layers)
Ejemplo n.º 12
0
def make_layers_from_size(sizes, isFinal=False):
    layers = []
    for size in sizes:
        layers += [
            nn.Conv2d(size[0], size[1], kernel_size=3, padding=1),
            nn.BatchNorm2d(size[1], momentum=0.1),
            nn.ReLU()
        ]
    if isFinal:
        layers.pop()
        layers.pop()
    return nn.Sequential(*layers)
Ejemplo n.º 13
0
    def __init__(self, input_nc, output_nc, h=96, w=96):
        super(AutoEncoderWithFC, self).__init__()
        
        out_features = 64
        model = [nn.Conv(input_nc, 64, kernel_size=4, stride=2, padding=1, bias=False)]
        in_features = out_features
        for _ in range(3):
            out_features *= 2
            model += [nn.LeakyReLU(0.2),
                      nn.Conv(in_features, out_features, 4,
                                    stride=2, padding=1, bias=False),
                      nn.BatchNorm2d(out_features)]
            in_features = out_features
        self.encoder = nn.Sequential(*model)

        self.rh = int(h/16)
        self.rw = int(w/16)
        self.feat_dim = 512 * self.rh * self.rw

        self.fc1 = nn.Linear(self.feat_dim, 1024)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(1024, self.feat_dim)
        
        model2 = []
        for _ in range(3):
            out_features //= 2
            model2 += [nn.ReLU(),
                       nn.ConvTranspose(in_features, out_features, 4, 
                                    stride=2, padding=1, bias=False),
                       nn.BatchNorm2d(out_features)]
            in_features = out_features
        model2 += [nn.ReLU(),
                    nn.ConvTranspose(out_features, output_nc, 4, stride=2, padding=1, bias=False),
                    nn.Tanh()]
        self.decoder = nn.Sequential(*model2)

        for m in self.modules():
            weights_init_normal(m)
Ejemplo n.º 14
0
    def __init__(self,
                 in_channel,
                 dim=256,
                 n_knn=16,
                 pos_hidden_dim=64,
                 attn_hidden_multiplier=4):
        super(Transformer, self).__init__()
        self.n_knn = n_knn
        self.conv_key = nn.Conv1d(dim, dim, 1)
        self.conv_query = nn.Conv1d(dim, dim, 1)
        self.conv_value = nn.Conv1d(dim, dim, 1)

        self.pos_mlp = nn.Sequential(nn.Conv2d(3, pos_hidden_dim, 1),
                                     nn.BatchNorm2d(pos_hidden_dim), nn.ReLU(),
                                     nn.Conv2d(pos_hidden_dim, dim, 1))

        self.attn_mlp = nn.Sequential(
            nn.Conv2d(dim, dim * attn_hidden_multiplier, 1),
            nn.BatchNorm2d(dim * attn_hidden_multiplier), nn.ReLU(),
            nn.Conv2d(dim * attn_hidden_multiplier, dim, 1))

        self.linear_start = nn.Conv1d(in_channel, dim, 1)
        self.linear_end = nn.Conv1d(dim, in_channel, 1)
Ejemplo n.º 15
0
 def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
     super(SepConv, self).__init__()
     self.op = nn.Sequential(
         nn.ReLU(),
         nn.Conv2d(C_in,
                   C_in,
                   kernel_size=kernel_size,
                   stride=stride,
                   padding=padding,
                   groups=C_in,
                   bias=False),
         nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
         nn.BatchNorm2d(C_in, affine=affine),
         nn.ReLU(),
         nn.Conv2d(C_in,
                   C_in,
                   kernel_size=kernel_size,
                   stride=1,
                   padding=padding,
                   groups=C_in,
                   bias=False),
         nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
         nn.BatchNorm2d(C_out, affine=affine),
     )
Ejemplo n.º 16
0
 def __init__(self, in_size, out_size, dropout=0.0):
     super(UNetUp, self).__init__()
     layers = [
         nn.ConvTranspose(in_size,
                          out_size,
                          4,
                          stride=2,
                          padding=1,
                          bias=False),
         nn.BatchNorm2d(out_size),
         nn.ReLU()
     ]
     if dropout:
         layers.append(nn.Dropout(dropout))
     self.model = nn.Sequential(*layers)
Ejemplo n.º 17
0
    def __init__(self, input_nc, classes, ngf=64, num_downs=3, h=96, w=96):
        super(Classifier, self).__init__()
        
        model = [nn.Conv(input_nc, ngf, 4, stride=2, padding=1, bias=False)]
        multiple = 2
        for i in range(num_downs):
            mult = multiple**i
            model += [nn.LeakyReLU(0.2),
                      nn.Conv(int(ngf * mult), int(ngf * mult * multiple), 4,
                                stride=2, padding=1, bias=False),
                      nn.BatchNorm2d(int(ngf * mult * multiple))]
        self.encoder = nn.Sequential(*model)
        strides = 2**(num_downs+1)
        self.fc1 = nn.Linear(int(ngf*h*w/(strides*2)), classes)

        for m in self.modules():
            weights_init_normal(m)
Ejemplo n.º 18
0
    def build_mlps(self,
                   mlp_spec: List[int],
                   use_xyz: bool = True,
                   bn: bool = True) -> nn.Sequential:
        layers = []

        if use_xyz:
            mlp_spec[0] += 3

        for i in range(1, len(mlp_spec)):
            layers.append(
                nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1))
            if bn:
                layers.append(nn.BatchNorm2d(mlp_spec[i]))
            layers.append(nn.ReLU())

        return nn.Sequential(*layers)
Ejemplo n.º 19
0
 def __init__(self, C_in, C_out, affine=True):
     super(FactorizedReduce, self).__init__()
     assert C_out % 2 == 0
     self.relu = nn.ReLU()
     self.conv_1 = nn.Conv2d(C_in,
                             C_out // 2,
                             1,
                             stride=2,
                             padding=0,
                             bias=False)
     self.conv_2 = nn.Conv2d(C_in,
                             C_out // 2,
                             1,
                             stride=2,
                             padding=0,
                             bias=False)
     self.bn = nn.BatchNorm2d(C_out, affine=affine)
Ejemplo n.º 20
0
    def __init__(self, in_channels=3, out_channels=1):
        super(Combiner, self).__init__()

        model = [nn.ReflectionPad2d(3),
                 nn.Conv(in_channels, 64, 7, padding=0, bias=False),
                 nn.BatchNorm2d(64),
                 nn.ReLU()]

        for i in range(2):
            model += [ResidualBlock(64, dropout=0.5)]

        model += [nn.ReflectionPad2d(3),
                  nn.Conv(64, out_channels, kernel_size=7, padding=0),
                  nn.Tanh()]

        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
Ejemplo n.º 21
0
    def __init__(self, nsample, in_channel, mlp, bandwidth):
        super(PointConvDensitySetInterpolation, self).__init__()
        self.bandwidth = bandwidth
        self.nsample = nsample
        self.in_channel = in_channel
        self.mlp_convs = nn.ModuleList()
        self.mlp_bns = nn.ModuleList()
        self.relu = nn.ReLU()
        last_channel = in_channel
        self.weightnet = WeightNet(3, 16)
        self.densitynet = DensityNet()

        for out_channel in mlp:
            self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
            self.mlp_bns.append(nn.BatchNorm2d(out_channel))
            last_channel = out_channel

        self.linear = nn.Linear(16 * mlp[-1], mlp[-1])
        self.bn_linear = nn.BatchNorm1d(mlp[-1])
Ejemplo n.º 22
0
    def __init__(self, in_channels=3, out_channels=1, num_res_blocks=9, extra_channel=3):
        super(GeneratorResStyle2Net, self).__init__()
        out_features = 64
        model0 = [nn.ReflectionPad2d(3), nn.Conv(in_channels, out_features, 7, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()]
        in_features = out_features
        for _ in range(2):
            out_features *= 2
            model0 += [nn.Conv(in_features, out_features, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()]
            in_features = out_features
        model = [nn.Conv2d(out_features + extra_channel,out_features, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()]
        for _ in range(num_res_blocks):
            model += [ResidualBlock(out_features)]
        for _ in range(2):
            out_features //= 2
            model += [nn.ConvTranspose(in_features, out_features, 3, stride=2, padding=1, output_padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()]
            in_features = out_features
        model += [nn.ReflectionPad2d(3), nn.Conv(out_features, out_channels, 7), nn.Tanh()]
        self.model0 = nn.Sequential(*model0)
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)