def __init__(self, in_channels, channels, params, stride=1):
        super(BasicBlockWOutput, self).__init__()
        add_output = params[0]
        num_classes = params[1]
        input_size = params[2]
        self.output_id = params[3]

        self.depth = 2

        layers = nn.ModuleList()

        conv_layer = []
        conv_layer.append(
            nn.Conv2d(in_channels,
                      channels,
                      kernel_size=3,
                      stride=stride,
                      padding=1,
                      bias=False))
        conv_layer.append(nn.BatchNorm2d(channels))
        conv_layer.append(nn.ReLU())
        conv_layer.append(
            nn.Conv2d(channels,
                      channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False))
        conv_layer.append(nn.BatchNorm2d(channels))

        layers.append(nn.Sequential(*conv_layer))

        shortcut = nn.Sequential()

        if stride != 1 or in_channels != self.expansion * channels:
            shortcut = nn.Sequential(
                nn.Conv2d(in_channels,
                          self.expansion * channels,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(self.expansion * channels))

        layers.append(shortcut)
        layers.append(nn.ReLU())

        self.layers = layers

        if add_output:
            self.output = af.InternalClassifier(input_size,
                                                self.expansion * channels,
                                                num_classes)
            self.no_output = False

        else:
            self.output = None
            self.forward = self.only_forward
            self.no_output = True
 def __init__(self, in_channels, out_channels, add_ic=False, num_classes=10, input_size=32):
     super(ConvBNRLUnit, self)
     self.layers = nn.Sequential(*[
         nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1),
         nn.BatchNorm2d(out_channels),
         nn.ReLu()
     ])
     
     if add_ic:
         self.ic = af.InternalClassifier(input_size, out_channels, num_classes)
     else:
         self.ic = None
Пример #3
0
    def __init__(self, in_channels, channels, dropout_rate, params, stride=1):
        super(wide_basic, self).__init__()

        add_output = params[0]
        num_classes = params[1]
        input_size = params[2]
        self.output_id = params[3]

        self.depth = 2

        self.layers = nn.ModuleList()
        conv_layer = []
        conv_layer.append(nn.BatchNorm2d(in_channels))
        conv_layer.append(nn.ReLU(inplace=True))
        conv_layer.append(
            nn.Conv2d(in_channels,
                      channels,
                      kernel_size=3,
                      padding=1,
                      bias=True))
        conv_layer.append(nn.Dropout(p=dropout_rate))
        conv_layer.append(nn.BatchNorm2d(channels))
        conv_layer.append(nn.ReLU(inplace=True))
        conv_layer.append(
            nn.Conv2d(channels,
                      channels,
                      kernel_size=3,
                      stride=stride,
                      padding=1,
                      bias=True))
        self.layers.append(nn.Sequential(*conv_layer))

        shortcut = nn.Sequential()
        if stride != 1 or in_channels != channels:
            shortcut = nn.Sequential(
                nn.Conv2d(in_channels,
                          channels,
                          kernel_size=1,
                          stride=stride,
                          bias=True), )

        self.layers.append(shortcut)

        if add_output:
            self.output = af.InternalClassifier(input_size, channels,
                                                num_classes)
            self.no_output = False
        else:
            self.output = None
            self.forward = self.only_forward
            self.no_output = True
    def __init__(self, in_channels, out_channels, params, stride=1):
        super(BlockWOutput, self).__init__()

        add_output = params[0]
        num_classes = params[1]
        input_size = params[2]
        self.output_id = params[3]

        self.depth = 2

        conv_layers = []
        conv_layers.append(
            nn.Conv2d(in_channels,
                      in_channels,
                      kernel_size=3,
                      stride=stride,
                      padding=1,
                      groups=in_channels,
                      bias=False))
        conv_layers.append(nn.BatchNorm2d(in_channels))
        conv_layers.append(nn.ReLU())
        conv_layers.append(
            nn.Conv2d(in_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=False))
        conv_layers.append(nn.BatchNorm2d(out_channels))
        conv_layers.append(nn.ReLU())

        self.layers = nn.Sequential(*conv_layers)

        if add_output:
            self.output = af.InternalClassifier(input_size, out_channels,
                                                num_classes)
            self.no_output = False

        else:
            self.forward = self.only_forward
            self.output = nn.Sequential()
            self.no_output = True
Пример #5
0
    def __init__(self, conv_params, output_params):
        super(ConvBlockWOutput, self).__init__()
        input_channels = conv_params[0]
        output_channels = conv_params[1]
        max_pool_size = conv_params[2]
        batch_norm = conv_params[3]

        add_output = output_params[0]
        num_classes = output_params[1]
        input_size = output_params[2]
        self.output_id = output_params[3]

        self.depth = 1

        conv_layers = []
        conv_layers.append(
            nn.Conv2d(in_channels=input_channels,
                      out_channels=output_channels,
                      kernel_size=3,
                      padding=1,
                      stride=1))

        if batch_norm:
            conv_layers.append(nn.BatchNorm2d(output_channels))

        conv_layers.append(nn.ReLU())

        if max_pool_size > 1:
            conv_layers.append(nn.MaxPool2d(kernel_size=max_pool_size))

        self.layers = nn.Sequential(*conv_layers)

        if add_output:
            self.output = af.InternalClassifier(input_size, output_channels,
                                                num_classes)
            self.no_output = False

        else:
            self.output = nn.Sequential()
            self.forward = self.only_forward
            self.no_output = True