Ejemplo n.º 1
0
    def __init__(self, in_channels, out_channels, stride=1, base_width=64):
        super().__init__()
        width = int(out_channels * (base_width / 64.))
        self.residual_function = nn.Sequential(
            layers.Conv2d(in_channels, width, kernel_size=1, bias=False),
            layers.BatchNorm2d(width),
            nn.ReLU(inplace=True),
            layers.Conv2d(width,
                          width,
                          stride=stride,
                          kernel_size=3,
                          padding=1,
                          bias=False),
            layers.BatchNorm2d(width),
            nn.ReLU(inplace=True),
            layers.Conv2d(width,
                          out_channels * BottleNeck.expansion,
                          kernel_size=1,
                          bias=False),
            layers.BatchNorm2d(out_channels * BottleNeck.expansion),
        )

        self.shortcut = layers.Identity2d(in_channels)

        if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
            self.shortcut = nn.Sequential(
                layers.Conv2d(in_channels,
                              out_channels * BottleNeck.expansion,
                              stride=stride,
                              kernel_size=1,
                              bias=False),
                layers.BatchNorm2d(out_channels * BottleNeck.expansion))
    def __init__(self, f_in: int, f_out: int, downsample=False):
        super(Block, self).__init__()

        stride = 2 if downsample else 1
        self.conv1 = layers.Conv2d(f_in,
                                   f_out,
                                   kernel_size=3,
                                   stride=stride,
                                   padding=1,
                                   bias=False)
        self.bn1 = layers.BatchNorm2d(f_out)
        self.conv2 = layers.Conv2d(f_out,
                                   f_out,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
        self.bn2 = layers.BatchNorm2d(f_out)

        # No parameters for shortcut connections.
        if downsample or f_in != f_out:
            self.shortcut = nn.Sequential(
                layers.Conv2d(f_in, f_out, kernel_size=1, stride=2,
                              bias=False), layers.BatchNorm2d(f_out))
        else:
            self.shortcut = layers.Identity2d(f_in)
Ejemplo n.º 3
0
    def __init__(self, in_channels, out_channels, stride=1, base_width=64):
        super().__init__()

        #residual function
        self.residual_function = nn.Sequential(
            layers.Conv2d(in_channels,
                          out_channels,
                          kernel_size=3,
                          stride=stride,
                          padding=1,
                          bias=False), layers.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            layers.Conv2d(out_channels,
                          out_channels * BasicBlock.expansion,
                          kernel_size=3,
                          padding=1,
                          bias=False),
            layers.BatchNorm2d(out_channels * BasicBlock.expansion))

        #shortcut
        self.shortcut = layers.Identity2d(in_channels)

        #the shortcut output dimension is not the same with residual function
        #use 1*1 convolution to match the dimension
        if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
            self.shortcut = nn.Sequential(
                layers.Conv2d(in_channels,
                              out_channels * BasicBlock.expansion,
                              kernel_size=1,
                              stride=stride,
                              bias=False),
                layers.BatchNorm2d(out_channels * BasicBlock.expansion))
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 base_width=64,
                 batch_norm=True):
        super().__init__()

        self.batch_norm = batch_norm

        # residual function
        layer_list = [
            layers.Conv2d(
                in_channels,
                out_channels,
                kernel_size=3,
                stride=stride,
                padding=1,
                bias=False,
            ),
        ]
        if self.batch_norm:
            layer_list.append(layers.BatchNorm2d(out_channels))
        layer_list += [
            nn.ReLU(inplace=True),
            layers.Conv2d(
                out_channels,
                out_channels * BasicBlock.expansion,
                kernel_size=3,
                padding=1,
                bias=False,
            ),
        ]
        if self.batch_norm:
            layer_list.append(
                layers.BatchNorm2d(out_channels * BasicBlock.expansion))
        self.residual_function = nn.Sequential(*layer_list)

        # shortcut
        self.shortcut = layers.Identity2d(in_channels)

        # the shortcut output dimension is not the same with residual function
        # use 1*1 convolution to match the dimension
        if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
            layer_list = [
                layers.Conv2d(
                    in_channels,
                    out_channels * BasicBlock.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False,
                )
            ]
            if self.batch_norm:
                layer_list.append(
                    layers.BatchNorm2d(out_channels * BasicBlock.expansion))
            self.shortcut = nn.Sequential(*layer_list)
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 base_width=64,
                 batch_norm=True):
        super().__init__()

        self.batch_norm = batch_norm

        width = int(out_channels * (base_width / 64.0))
        layer_list = [
            layers.Conv2d(in_channels, width, kernel_size=1, bias=False),
        ]
        if self.batch_norm:
            layer_list.append(layers.BatchNorm2d(width))
        layer_list += [
            nn.ReLU(inplace=True),
            layers.Conv2d(width,
                          width,
                          stride=stride,
                          kernel_size=3,
                          padding=1,
                          bias=False),
        ]
        if self.batch_norm:
            layer_list.append(layers.BatchNorm2d(width))
        layer_list += [
            nn.ReLU(inplace=True),
            layers.Conv2d(width,
                          out_channels * BottleNeck.expansion,
                          kernel_size=1,
                          bias=False),
        ]
        if self.batch_norm:
            layer_list.append(
                layers.BatchNorm2d(out_channels * BottleNeck.expansion))
        self.residual_function = nn.Sequential(*layer_list)

        self.shortcut = layers.Identity2d(in_channels)

        if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
            layer_list = [
                layers.Conv2d(
                    in_channels,
                    out_channels * BottleNeck.expansion,
                    stride=stride,
                    kernel_size=1,
                    bias=False,
                ),
            ]
            if self.batch_norm:
                layer_list.append(
                    layers.BatchNorm2d(out_channels * BottleNeck.expansion))
            self.shortcut = nn.Sequential(*layer_list)
Ejemplo n.º 6
0
    def __init__(self, plan, num_classes, dense_classifier):
        super(ResNet, self).__init__()

        # Initial convolution.
        current_filters = plan[0][0]
        self.conv = layers.Conv2d(
            3, current_filters, kernel_size=3, stride=1, padding=1, bias=False
        )
        self.bn = layers.BatchNorm2d(current_filters)

        # The subsequent blocks of the ResNet.
        blocks = []
        for segment_index, (filters, num_blocks) in enumerate(plan):
            for block_index in range(num_blocks):
                downsample = segment_index > 0 and block_index == 0
                blocks.append(Block(current_filters, filters, downsample))
                current_filters = filters

        self.blocks = nn.Sequential(*blocks)

        self.fc = layers.Linear(plan[-1][0], num_classes)
        if dense_classifier:
            self.fc = nn.Linear(plan[-1][0], num_classes)

        self._initialize_weights()
Ejemplo n.º 7
0
 def __init__(self, in_filters, out_filters):
     super(ConvBNModule, self).__init__()
     self.conv = layers.Conv2d(in_filters,
                               out_filters,
                               kernel_size=3,
                               padding=1)
     self.bn = layers.BatchNorm2d(out_filters)
Ejemplo n.º 8
0
    def __init__(self,
                 block,
                 num_block,
                 base_width,
                 num_classes=200,
                 dense_classifier=False):
        super().__init__()

        self.in_channels = 64

        self.conv1 = nn.Sequential(
            layers.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
            layers.BatchNorm2d(64), nn.ReLU(inplace=True))
        #we use a different inputsize than the original paper
        #so conv2_x's stride is 1
        self.conv2_x = self._make_layer(block, 64, num_block[0], 1, base_width)
        self.conv3_x = self._make_layer(block, 128, num_block[1], 2,
                                        base_width)
        self.conv4_x = self._make_layer(block, 256, num_block[2], 2,
                                        base_width)
        self.conv5_x = self._make_layer(block, 512, num_block[3], 2,
                                        base_width)
        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = layers.Linear(512 * block.expansion, num_classes)
        if dense_classifier:
            self.fc = nn.Linear(512 * block.expansion, num_classes)
        self._initialize_weights()
Ejemplo n.º 9
0
def make_layers(cfg, batch_norm=False):
    layer_list = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layer_list += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = layers.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layer_list += [
                    conv2d,
                    layers.BatchNorm2d(v),
                    nn.ReLU(inplace=True)
                ]
            else:
                layer_list += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layer_list)
def make_layers(cfg, batch_norm=False):
    layer_list = []

    input_channel = 3
    for l in cfg:
        if l == 'M':
            layer_list += [nn.MaxPool2d(kernel_size=2, stride=2)]
            continue

        layer_list += [
            layers.Conv2d(input_channel, l, kernel_size=3, padding=1)
        ]

        if batch_norm:
            layer_list += [layers.BatchNorm2d(l)]

        layer_list += [nn.ReLU(inplace=True)]
        input_channel = l

    return nn.Sequential(*layer_list)