Exemplo n.º 1
0
 def __init__(self, in_channels=3):
     super(DenseNet, self).__init__()
     self.conv1 = Conv2dCOB(in_channels=in_channels,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.conv2 = Conv2dCOB(in_channels=3,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.conv3 = Conv2dCOB(in_channels=6,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.conv4 = Conv2dCOB(in_channels=3,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.relu1 = ReLUCOB()
     self.relu2 = ReLUCOB()
     self.relu3 = ReLUCOB()
     self.relu4 = ReLUCOB()
     self.concat1 = Concat()
     self.flatten = FlattenCOB()
     self.fc1 = LinearCOB(in_channels * 32 * 32, 10)
Exemplo n.º 2
0
 def __init__(self,
              num_input_features,
              growth_rate,
              bn_size,
              drop_rate,
              memory_efficient=False):
     super(_DenseLayerCOB, self).__init__()
     self.add_module('norm1', BatchNorm2dCOB(num_input_features)),
     self.add_module('relu1', ReLUCOB(inplace=True)),
     self.add_module(
         'conv1',
         Conv2dCOB(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module('norm2', BatchNorm2dCOB(bn_size * growth_rate)),
     self.add_module('relu2', ReLUCOB(inplace=True)),
     self.add_module(
         'conv2',
         Conv2dCOB(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.drop_rate = float(drop_rate)
     self.memory_efficient = memory_efficient
     self.concat = Concat()
     self.dropout = DropoutCOB(self.drop_rate)
    def __init__(self):
        super(ResidualNet5, self).__init__()
        nb_channels = 3
        self.conv1 = Conv2dCOB(in_channels=1,
                               out_channels=nb_channels,
                               kernel_size=3,
                               padding=1)
        self.conv2 = Conv2dCOB(in_channels=nb_channels,
                               out_channels=nb_channels,
                               kernel_size=3,
                               padding=1)
        self.conv3 = Conv2dCOB(in_channels=nb_channels,
                               out_channels=nb_channels,
                               kernel_size=3,
                               padding=1)
        self.conv4 = Conv2dCOB(in_channels=nb_channels,
                               out_channels=nb_channels,
                               kernel_size=3)
        self.conv11 = Conv2dCOB(in_channels=nb_channels,
                                out_channels=nb_channels,
                                kernel_size=3,
                                padding=1)
        self.relu1 = ReLUCOB()
        self.relu2 = ReLUCOB()
        self.relu3 = ReLUCOB()
        self.relu4 = ReLUCOB()
        self.add1 = Add()
        self.add2 = Add()

        self.relu5 = ReLUCOB()
    def __init__(self):
        super(ResidualNet2, self).__init__()
        self.conv1 = Conv2dCOB(in_channels=1,
                               out_channels=3,
                               kernel_size=3,
                               padding=1)
        self.conv2 = Conv2dCOB(in_channels=3,
                               out_channels=3,
                               kernel_size=3,
                               padding=1)
        self.conv3 = Conv2dCOB(in_channels=3,
                               out_channels=3,
                               kernel_size=3,
                               padding=1)
        self.conv4 = Conv2dCOB(in_channels=3, out_channels=3, kernel_size=3)
        self.relu1 = ReLUCOB()
        self.relu2 = ReLUCOB()
        self.relu3 = ReLUCOB()
        self.relu4 = ReLUCOB()
        self.add1 = Add()
        self.add2 = Add()
        self.flatten = FlattenCOB()
        self.fc1 = LinearCOB(2028, 10)

        self.relu5 = ReLUCOB()
Exemplo n.º 5
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              norm_layer=None):
     super(BasicBlockCOB, self).__init__()
     if norm_layer is None:
         norm_layer = BatchNorm2dCOB
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     # Both self.conv1 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu1 = ReLUCOB(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.add = Add()
     self.relu2 = ReLUCOB(inplace=True)
     self.stride = stride
Exemplo n.º 6
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              norm_layer=None):
     super(BottleneckCOB, self).__init__()
     if norm_layer is None:
         norm_layer = BatchNorm2dCOB
     width = int(planes * (base_width / 64.)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width, width, stride, groups, dilation)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, planes * self.expansion)
     self.bn3 = norm_layer(planes * self.expansion)
     self.relu1 = ReLUCOB(inplace=True)
     self.relu2 = ReLUCOB(inplace=True)
     self.relu3 = ReLUCOB(inplace=True)
     self.downsample = downsample
     self.stride = stride
     self.add = Add()
Exemplo n.º 7
0
 def __init__(self):
     super(Net4, self).__init__()
     self.conv1 = Conv2dCOB(1, 6, 5)
     self.conv2 = Conv2dCOB(6, 3, 5)
     self.relu1 = ReLUCOB()
     self.relu2 = ReLUCOB()
     self.bn1 = BatchNorm2dCOB(6)
     self.bn2 = BatchNorm2dCOB(3)
Exemplo n.º 8
0
 def __init__(self, in_ch, out_ch):
     super().__init__()
     self.net = nn.Sequential(
         Conv2dCOB(in_ch, out_ch, kernel_size=3, padding=1),
         BatchNorm2dCOB(out_ch),
         ReLUCOB(inplace=False),
         Conv2dCOB(out_ch, out_ch, kernel_size=3, padding=1),
         BatchNorm2dCOB(out_ch),
         ReLUCOB(inplace=False),
     )
Exemplo n.º 9
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = Conv2dCOB(1, 6, 5)
     self.conv2 = Conv2dCOB(6, 16, 5)
     self.fc1 = LinearCOB(16 * 20 * 20, 120)
     self.fc2 = LinearCOB(120, 84)
     self.fc3 = LinearCOB(84, 10)
     self.flatten = FlattenCOB()
     self.relu1 = ReLUCOB()
     self.relu2 = ReLUCOB()
     self.tanhcob = TanhCOB()
     self.relu3 = ReLUCOB()
Exemplo n.º 10
0
 def __init__(self):
     super(ResidualNet3, self).__init__()
     self.conv1 = Conv2dCOB(in_channels=1,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.conv2 = Conv2dCOB(in_channels=3,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.relu1 = ReLUCOB()
     self.relu2 = ReLUCOB()
     self.add1 = Add()
     self.relu3 = ReLUCOB()
Exemplo n.º 11
0
 def __init__(self):
     super(DenseNet4, self).__init__()
     self.conv1 = Conv2dCOB(in_channels=1,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.conv2 = Conv2dCOB(in_channels=3,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.conv3 = Conv2dCOB(in_channels=6,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.relu1 = ReLUCOB()
     self.relu2 = ReLUCOB()
     self.relu3 = ReLUCOB()
     self.concat1 = Concat()
Exemplo n.º 12
0
 def __init__(self):
     super(Net2, self).__init__()
     self.conv1 = Conv2dCOB(1, 6, 5)
     self.pool1 = MaxPool2dCOB(kernel_size=2)
     self.pool2 = MaxPool2dCOB(kernel_size=2)
     self.conv2 = Conv2dCOB(6, 16, 5)
     self.flatten = FlattenCOB()
     self.sigmoid = SigmoidCOB()
     self.relu1 = ReLUCOB()
Exemplo n.º 13
0
 def __init__(self, num_input_features, num_output_features):
     super(_TransitionCOB, self).__init__()
     self.add_module('norm', BatchNorm2dCOB(num_input_features))
     self.add_module('relu', ReLUCOB(inplace=True))
     self.add_module(
         'conv',
         Conv2dCOB(num_input_features,
                   num_output_features,
                   kernel_size=1,
                   stride=1,
                   bias=False))
     self.add_module('pool', AvgPool2dCOB(kernel_size=2, stride=2))
Exemplo n.º 14
0
 def __init__(self):
     super(SplitConcatModel, self).__init__()
     self.conv1 = Conv2dCOB(in_channels=1,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.conv21 = Conv2dCOB(in_channels=3,
                             out_channels=3,
                             kernel_size=3,
                             padding=1)
     self.conv22 = Conv2dCOB(in_channels=3,
                             out_channels=3,
                             kernel_size=3,
                             padding=1)
     self.conv3 = Conv2dCOB(in_channels=6,
                            out_channels=3,
                            kernel_size=3,
                            padding=1)
     self.relu1 = ReLUCOB()
     self.relu21 = ReLUCOB()
     self.relu22 = ReLUCOB()
     self.concat1 = Concat()
Exemplo n.º 15
0
    def __init__(self,
                 input_shape,
                 num_classes,
                 hidden_layers=(500, 500, 500, 500, 500),
                 activation="relu"):
        super().__init__()

        # Create the input and hidden layers
        layers_dim = ((reduce(mul, input_shape), ) + tuple(hidden_layers))
        layers = []
        for idx in range(len(hidden_layers)):
            layers.append(LinearCOB(layers_dim[idx], layers_dim[idx + 1]))

            if activation == 'elu':
                layers.append(ELUCOB())
            elif activation == 'leakyrelu':
                layers.append(LeakyReLUCOB())
            elif activation == 'tanh':
                layers.append(TanhCOB())
            else:
                layers.append(ReLUCOB())

        self.net = torch.nn.Sequential(
            FlattenCOB(), *layers, LinearCOB(hidden_layers[-1], num_classes))
Exemplo n.º 16
0
    def __init__(self,
                 growth_rate,
                 block_config,
                 num_init_features,
                 num_classes,
                 bn_size=4,
                 drop_rate=0,
                 input_channels=3,
                 memory_efficient=False):

        super(DenseNetCOB, self).__init__()
        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 Conv2dCOB(input_channels,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', BatchNorm2dCOB(num_init_features)),
                ('relu0', ReLUCOB(inplace=True)),
                ('pool0', MaxPool2dCOB(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlockCOB(num_layers=num_layers,
                                   num_input_features=num_features,
                                   bn_size=bn_size,
                                   growth_rate=growth_rate,
                                   drop_rate=drop_rate,
                                   memory_efficient=memory_efficient)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _TransitionCOB(num_input_features=num_features,
                                       num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', BatchNorm2dCOB(num_features))

        # Flatten layer
        self.relu = ReLUCOB(inplace=True)
        self.adaptive_avg_pool2d = AdaptiveAvgPool2dCOB((1, 1))
        self.flatten = FlattenCOB()

        # Linear layer
        self.classifier = LinearCOB(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, Conv2dCOB):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, BatchNorm2dCOB):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, LinearCOB):
                nn.init.constant_(m.bias, 0)
Exemplo n.º 17
0
 def __init__(self):
     super().__init__()
     self.net = torch.nn.Sequential(FlattenCOB(), LinearCOB(784, 128),
                                    BatchNorm1dCOB(128), ReLUCOB(),
                                    LinearCOB(128, 10))
Exemplo n.º 18
0
 def __init__(self):
     super(ConvTransposeNet, self).__init__()
     self.conv1 = Conv2dCOB(1, 6, 5)
     self.conv2 = ConvTranspose2dCOB(6, 3 // 2, kernel_size=2, stride=2)
     self.relu1 = ReLUCOB(inplace=True)
     self.relu2 = ReLUCOB(inplace=True)
Exemplo n.º 19
0
    def __init__(self,
                 block,
                 layers,
                 num_classes,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 norm_layer=None,
                 input_channels=3,
                 for_dataset=None):
        super(ResNetCOB, self).__init__()
        if norm_layer is None:
            norm_layer = BatchNorm2dCOB
        self._norm_layer = norm_layer

        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group

        self.conv1 = Conv2dCOB(input_channels,
                               self.inplanes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        if for_dataset == "cifar":
            # CIFAR10: kernel_size 7 -> 3, stride 2 -> 1, padding 3->1
            self.conv1 = Conv2dCOB(input_channels,
                                   self.inplanes,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = ReLUCOB(inplace=True)
        self.maxpool = MaxPool2dCOB(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.avgpool = AdaptiveAvgPool2dCOB((1, 1))
        self.fc = LinearCOB(512 * block.expansion, num_classes)
        self.flatten = FlattenCOB()

        for m in self.modules():
            if isinstance(m, Conv2dCOB):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, (BatchNorm2dCOB, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, BottleneckCOB):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlockCOB):
                    nn.init.constant_(m.bn2.weight, 0)