示例#1
0
    def __init__(self, in_channels, out_channels, norm_layer):
        super(DANetHead, self).__init__()
        inter_channels = in_channels // 2  #256
        self.danet_conv5a = Sequential(
            Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            norm_layer(inter_channels), ReLU())

        self.danet_conv5c = Sequential(
            Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            norm_layer(inter_channels), ReLU())

        self.sa = PAM_Module(inter_channels)
        self.sc = CAM_Module(inter_channels)
        self.danet_conv51 = Sequential(
            Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
            norm_layer(inter_channels), ReLU())
        self.danet_conv52 = Sequential(
            Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
            norm_layer(inter_channels), ReLU())

        self.danet_conv6 = Sequential(Dropout2d(0.1, False),
                                      Conv2d(256, out_channels, 1))
        self.danet_conv7 = Sequential(Dropout2d(0.1, False),
                                      Conv2d(256, out_channels, 1))
        self.danet_conv8 = Sequential(Dropout2d(0.1, False),
                                      Conv2d(512, out_channels, 1))
示例#2
0
    def __init__(self,
                 cnn_channels: int,
                 cnn_dropout: float,
                 inner_kernel_size: Union[int, Tuple[int, int]],
                 inner_padding: Union[int, Tuple[int, int]]) \
            -> None:
        """Depthwise separable blocks.

        :param cnn_channels: Amount of output CNN channels. For first\
                             CNN in the block is considered equal to 1.
        :type cnn_channels: int
        :param cnn_dropout: Dropout to apply.
        :type cnn_dropout: float
        :param inner_kernel_size: Kernel shape to use.
        :type inner_kernel_size: (int, int)|int
        :param inner_padding: Padding to use.
        :type inner_padding: (int, int)|int
        """
        super().__init__()

        self.layer_1: Module = Sequential(
            DepthWiseSeparableConvBlock(in_channels=1,
                                        out_channels=cnn_channels,
                                        kernel_size=5,
                                        stride=1,
                                        padding=2,
                                        inner_kernel_size=inner_kernel_size,
                                        inner_padding=inner_padding), ReLU(),
            BatchNorm2d(cnn_channels),
            MaxPool2d(kernel_size=(1, 5), stride=(1, 5)),
            Dropout2d(cnn_dropout))

        self.layer_2: Module = Sequential(
            DepthWiseSeparableConvBlock(in_channels=cnn_channels,
                                        out_channels=cnn_channels,
                                        kernel_size=5,
                                        stride=1,
                                        padding=2,
                                        inner_kernel_size=inner_kernel_size,
                                        inner_padding=inner_padding), ReLU(),
            BatchNorm2d(cnn_channels),
            MaxPool2d(kernel_size=(1, 4), stride=(1, 4)),
            Dropout2d(cnn_dropout))

        self.layer_3: Module = Sequential(
            DepthWiseSeparableConvBlock(in_channels=cnn_channels,
                                        out_channels=cnn_channels,
                                        kernel_size=5,
                                        stride=1,
                                        padding=2,
                                        inner_kernel_size=inner_kernel_size,
                                        inner_padding=inner_padding), ReLU(),
            BatchNorm2d(cnn_channels),
            MaxPool2d(kernel_size=(1, 2), stride=(1, 2)),
            Dropout2d(cnn_dropout))

        self.layers: List[Module] = [self.layer_1, self.layer_2, self.layer_3]
示例#3
0
    def __init__(self):
        self.name = "Becca"
        self.__version__ = "1.41"
        super(Becca, self).__init__()

        self.cnn_relu_block_1 = Sequential(
            # Conv Layer block 1 -- feature extraction
            Conv2d(3, 32, 3, 1),
            BatchNorm2d(32),
            LeakyReLU(inplace=True),
            Conv2d(32, 128, 3, 1),
            LeakyReLU(inplace=True),
            MaxPool2d(4, 3),
            Dropout2d(p=0.25),
        )

        self.cnn_relu_block_2 = Sequential(
            # Conv Layer block 2
            Conv2d(128, 256, 3, 1),
            BatchNorm2d(256),
            LeakyReLU(inplace=True),
            Dropout(p=0.4),
            Conv2d(256, 256, 4, 1),
            LeakyReLU(inplace=True),
            MaxPool2d(5, 3),
            Dropout2d(p=0.5),
        )

        self.cnn_relu_block_3 = Sequential(
            # Conv Layer block 3
            Conv2d(256, 384, 4, 1),
            BatchNorm2d(384),
            LeakyReLU(inplace=True),
            Dropout(p=0.5),
            Conv2d(384, 384, 4, 1),
            LeakyReLU(inplace=True),
            MaxPool2d(5, 3),
            Dropout2d(p=0.375),
        )

        self.cnn_flatten = Sequential(Flatten(), )
        self.cnn_relu_block_linear = Sequential(
            Dropout(p=0.2),
            Linear(4096, 1024),
            LeakyReLU(inplace=True),
            Dropout(p=0.5),
            Linear(1024, 128),
            LeakyReLU(inplace=True),
            Dropout(p=0.4),
            Linear(128, 10),
            # softmax (?)
        )
示例#4
0
    def __init__(self, num_classes):
        super(UNetMini, self).__init__()

        self.block1 = Sequential(
            Conv2d(1, 32, kernel_size=3, padding=1),
            ReLU(),
            Dropout2d(0.2),
            Conv2d(32, 32, kernel_size=3, padding=1),
            ReLU(),
        )
        self.pool1 = MaxPool2d((2, 2))

        self.block2 = Sequential(
            Conv2d(32, 64, kernel_size=3, padding=1),
            ReLU(),
            Dropout2d(0.2),
            Conv2d(64, 64, kernel_size=3, padding=1),
            ReLU(),
        )
        self.pool2 = MaxPool2d((2, 2))

        self.block3 = Sequential(
            Conv2d(64, 128, kernel_size=3, padding=1),
            ReLU(),
            Dropout2d(0.2),
            Conv2d(128, 128, kernel_size=3, padding=1),
            ReLU()
        )

        self.up1 = UpsamplingNearest2d(scale_factor=2)
        self.block4 = Sequential(
            Conv2d(192, 64, kernel_size=3, padding=1),
            ReLU(),
            Dropout2d(0.2),
            Conv2d(64, 64, kernel_size=3, padding=1),
            ReLU()
        )

        self.up2 = UpsamplingNearest2d(scale_factor=2)
        self.block5 = Sequential(
            Conv2d(96, 32, kernel_size=3, padding=1),
            ReLU(),
            Dropout2d(0.2),
            Conv2d(32, 32, kernel_size=3, padding=1),
            ReLU()
        )

        self.interpolate = Interpolate(mode='bilinear')
        self.conv2d = Conv2d(32, num_classes, kernel_size=1)
        self.sigmoid = Sigmoid()
示例#5
0
    def __init__(self,
                 cnn_channels,
                 inner_kernel_size,
                 inner_padding,
                 cnn_dropout,
                 pool_size=5):
        """The CNN dec of the Masker.

        :param cnn_channels: The amount of CNN channels used in the blocks
        :type cnn_channels: int
        :param inner_kernel_size: Size of the kernel used for the inner convolution
        :type inner_kernel_size int
        :param inner_padding: Padding size for the inner convolution
        :type inner_padding: int
        :param cnn_dropout: Dropout rate for the convolutions
        :type cnn_dropout: float
        :param pool_size: Amount of features pooled in MaxPool2d
        :type pool_size: int
        """
        super(CNNDec, self).__init__()
        self.layer_1: Module = Sequential(
            DepthWiseSeparableConvBlock(in_channels=cnn_channels,
                                        out_channels=cnn_channels,
                                        kernel_size=5,
                                        stride=1,
                                        padding=2,
                                        inner_kernel_size=inner_kernel_size,
                                        inner_padding=inner_padding), ReLU(),
            BatchNorm2d(cnn_channels),
            MaxPool2d(kernel_size=(1, pool_size), stride=(1, pool_size)),
            Dropout2d(cnn_dropout))
示例#6
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 bottleneck_ratio: Optional[int] = None,
                 dropout: float = 0.0):
        super(DenseLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.add_module('norm', BatchNorm2d(num_features=in_channels))
        self.add_module('relu', ReLU(inplace=True))

        if bottleneck_ratio is not None:
            self.add_module(
                'bottleneck',
                Bottleneck(in_channels, bottleneck_ratio * out_channels))
            in_channels = bottleneck_ratio * out_channels

        self.add_module(
            'conv',
            Conv2d(in_channels,
                   out_channels,
                   kernel_size=3,
                   padding=1,
                   bias=False))

        if dropout > 0:
            self.add_module('drop', Dropout2d(dropout, inplace=True))
 def __init__(self,
              inplanes: int,
              planes: int,
              stride: int = 1,
              downsample: Optional[nn.Module] = None,
              groups: int = 1,
              base_width: int = 64,
              dilation: int = 1,
              norm_layer: Optional[Callable[..., nn.Module]] = None,
              dropout_rate: float = 0.1) -> None:
     super(BasicBlock, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             'Dilation > 1 not supported in BasicBlock')
     # Both self.conv1 and self.downsample layers downsample the input when
     # stride != 1.
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.ReLU(inplace=True)
     self.dropout = Dropout2d(p=dropout_rate, inplace=False)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
 def __init__(self,
              inplanes: int,
              planes: int,
              stride: int = 1,
              downsample: Optional[nn.Module] = None,
              groups: int = 1,
              base_width: int = 64,
              dilation: int = 1,
              norm_layer: Optional[Callable[..., nn.Module]] = None,
              dropout_rate: float = 0.1) -> None:
     super(Bottleneck, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     width = int(planes * (base_width / 64.)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when
     # stride != 1.
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width, width, stride, groups, dilation)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, planes * self.expansion)
     self.bn3 = norm_layer(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.dropout = Dropout2d(p=dropout_rate, inplace=False)
     self.downsample = downsample
     self.stride = stride
示例#9
0
    def __init__(self,
                 in_channels: int,
                 compression: float = 1.0,
                 dropout: float = 0.0):
        super(TransitionDown, self).__init__()

        if not 0.0 < compression <= 1.0:
            raise ValueError(
                f'Compression must be in (0, 1] range, got {compression}')

        self.in_channels = in_channels
        self.dropout = dropout
        self.compression = compression
        self.out_channels = int(ceil(compression * in_channels))

        self.add_module('norm', BatchNorm2d(num_features=in_channels))
        self.add_module('relu', ReLU(inplace=True))
        self.add_module(
            'conv',
            Conv2d(in_channels, self.out_channels, kernel_size=1, bias=False))

        if dropout > 0:
            self.add_module('drop', Dropout2d(dropout))

        self.add_module('pool', MaxPool2d(kernel_size=2, stride=2))
示例#10
0
    def __init__(self, input_shape):
        '''
        @param input_shape:(b,c,h,w)
        '''
        super(SimeseNet, self).__init__()
        self.input_shape = input_shape

        self.cnn = torch.nn.Sequential(
            Conv_BN_RELU(input_shape[0], 32, 3, 2, 1),  # (b,c,46,46)
            # self.drop1 = Dropout2d(.2)
            Conv_BN_RELU(32, 32, 3, 2, 1),
            MaxPool2d((2, 2), 1),
            # self.drop2 = Dropout2d(.2)
            Conv_BN_RELU(32, 64, 3, 2, 1),
            # self.drop3 = Dropout2d(.2)
            Conv_BN_RELU(64, 64, 3, 2, 1),
            # self.drop4 = Dropout2d(.2)
            MaxPool2d((2, 2), 1)
        )
        if input_shape[1] == 50:
            linear1 = Linear(in_features=64 * 2 * 2, out_features=512)
        elif input_shape[1] == 30:
            linear1 = Linear(in_features=64, out_features=512)
        self.fc = Sequential(
            linear1,
            ReLU(inplace=True),
            Dropout2d(.4),
            Linear(in_features=512, out_features=2)
        )
    def __init__(self):
        super(OCRModel, self).__init__()
        self.conv1 = Conv2d(1, 32, (3, 3), padding=1, bias=False)
        self.conv2 = Conv2d(32, 32, (3, 3), padding=1, bias=False)
        self.conv3 = Conv2d(32, 64, (3, 3), padding=1, bias=False)
        self.conv4 = Conv2d(64, 64, (3, 3), padding=1, bias=False)
        self.conv5 = Conv2d(64, 128, (3, 3), padding=1, bias=False)
        self.conv6 = Conv2d(128, 128, (3, 3), padding=1, bias=False)
        self.conv7 = Conv2d(128, 256, (3, 3), padding=1, bias=False)
        self.conv8 = Conv2d(256, 256, (3, 3), padding=1, bias=False)
        self.conv9 = Conv2d(256, 512, (2, 3), padding=(0, 1), bias=False)
        self.conv10 = Conv2d(512, 512, (1, 5), padding=(0, 2), bias=False)
        self.conv11 = Conv2d(512, 94, (1, 1), padding=(0, 0))

        self.conv_attenton = Conv2d(512, 1, (1, 1), padding=0)

        self.batch1 = InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
        self.batch2 = InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
        self.batch3 = InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
        self.batch5 = InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
        self.batch7 = InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
        self.batch8 = InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
        self.batch9 = InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
        self.batch10 = InstanceNorm2d(512,
                                      eps=1e-05,
                                      momentum=0.1,
                                      affine=True)
        self.drop1 = Dropout2d(p=0.2, inplace=False)
        self.leaky = LeakyReLU(negative_slope=0.01, inplace=False)
        self.max1 = MaxPool2d((2, 2), stride=None)
        self.max2 = MaxPool2d((2, 1), stride=(2, 1))
示例#12
0
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(1, 10, kernel_size=5)
     self.conv2 = Conv2d(10, 20, kernel_size=5)
     self.conv2_drop = Dropout2d()
     self.fc1 = Linear(320, 50)
     self.fc2 = Linear(50, 10)
示例#13
0
 def __init__(self, weights=None):
     super(Model, self).__init__()
     self.conv1 = Conv2d(1, 10, kernel_size=5)
     self.conv2 = Conv2d(10, 20, kernel_size=5)
     self.drop = Dropout2d()
     self.fc1 = Linear(320, 50)
     self.fc2 = Linear(50, 10)
     self.reset_weights(weights)
示例#14
0
    def __init__(self, input_isize=(60, 60, 3), predict_mode=False):
        torch.nn.Module.__init__(self)
        TrainableModule.__init__(self, predict_mode=predict_mode)

        self.input_isize = input_isize
        self.n_hidden = 256
        self.conv1 = Conv2d(in_channels=input_isize[2],
                            out_channels=32,
                            kernel_size=3,
                            stride=1)
        self.max_pool1 = MaxPool2d(kernel_size=3, stride=2)

        self.conv2 = Conv2d(in_channels=32,
                            out_channels=32,
                            kernel_size=3,
                            stride=1)
        self.max_pool2 = MaxPool2d(kernel_size=3, stride=2)
        self.bn1 = BatchNorm2d(32,
                               eps=1e-3,
                               momentum=0.99,
                               track_running_stats=False)
        # track_running_stats=False is better
        self.dropout1 = Dropout2d(0.5)

        self.conv3 = Conv2d(in_channels=32,
                            out_channels=64,
                            kernel_size=3,
                            stride=1)
        self.max_pool3 = MaxPool2d(kernel_size=3, stride=2)

        self.conv4 = Conv2d(
            in_channels=64,
            out_channels=64,
            kernel_size=3,
            stride=1,
        )
        self.max_pool4 = MaxPool2d(kernel_size=3, stride=2)

        self.bn2 = BatchNorm2d(64,
                               eps=1e-3,
                               momentum=0.99,
                               track_running_stats=False)
        self.dropout2 = Dropout2d(0.5)
        self.fc1 = Linear(in_features=256, out_features=self.n_hidden)
        self.fc2 = Linear(in_features=self.n_hidden, out_features=20)
示例#15
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = Conv2d(in_channels=1, out_channels=32,
                         kernel_size=3, stride=1)
     self.conv2 = Conv2d(in_channels=32, out_channels=64,
                         kernel_size=3, stride=1)
     self.dropout1 = Dropout2d(p=0.5)
     self.fc1 = Linear(in_features=12544, out_features=128)
     self.fc2 = Linear(in_features=128, out_features=40)
    def __init__(self, input_channels=1, img_h=766, img_w=1366, num_classes=4):
        super(FeatureNet_v1, self).__init__()
        self.input_channels = input_channels

        self.img_h = img_h
        self.img_w = img_w

        self.num_classes = num_classes

        self.conv_1 = Conv2d(in_channels=self.input_channels,
                             out_channels=8,
                             kernel_size=3,
                             stride=1,
                             padding=0)

        dim_c1_w = compute_conv_dim(self.img_w, 3, 0, 1)
        dim_c1_h = compute_conv_dim(self.img_h, 3, 0, 1)

        self.conv_2 = Conv2d(in_channels=8,
                             out_channels=16,
                             kernel_size=3,
                             stride=1,
                             padding=0)

        dim_c2_w = compute_conv_dim(dim_c1_w, 3, 0, 1)
        dim_c2_h = compute_conv_dim(dim_c1_h, 3, 0, 1)

        self.pool_1 = MaxPool2d(5, stride=2)

        dim_p1_w = compute_conv_dim(dim_c2_w, 5, 0, 2)
        dim_p1_h = compute_conv_dim(dim_c2_h, 5, 0, 2)

        self.conv_3 = Conv2d(in_channels=16,
                             out_channels=32,
                             kernel_size=5,
                             stride=1,
                             padding=0)

        dim_c3_w = compute_conv_dim(dim_p1_w, 5, 0, 1)
        dim_c3_h = compute_conv_dim(dim_p1_h, 5, 0, 1)

        self.pool_2 = AvgPool2d(5, stride=3)

        dim_p2_w = compute_conv_dim(dim_c3_w, 5, 0, 3)
        dim_p2_h = compute_conv_dim(dim_c3_h, 5, 0, 3)

        self.dropout = Dropout2d(p=0.5)

        #parameters for output layer = dim_p2*dim_p2*128
        self.l1_in_features = dim_p2_w * dim_p2_h * 32

        self.l_1 = Linear(in_features=self.l1_in_features, out_features=128)

        self.l_out = Linear(in_features=128,
                            out_features=num_classes,
                            bias=False)
示例#17
0
    def __init__(self,
                 cnn_channels: int,
                 cnn_dropout: float) \
            -> None:
        """Baseline CNN blocks.

        :param cnn_channels: CNN channels.
        :type cnn_channels: int
        :param cnn_dropout: Dropout to apply.
        :type cnn_dropout: float
        """
        super().__init__()

        self.layer_1: Module = Sequential(
            Conv2d(in_channels=1,
                   out_channels=cnn_channels,
                   kernel_size=5,
                   stride=1,
                   padding=2), ReLU(), BatchNorm2d(cnn_channels),
            MaxPool2d(kernel_size=(1, 5), stride=(1, 5)),
            Dropout2d(cnn_dropout))

        self.layer_2: Module = Sequential(
            Conv2d(in_channels=cnn_channels,
                   out_channels=cnn_channels,
                   kernel_size=5,
                   stride=1,
                   padding=2), ReLU(), BatchNorm2d(cnn_channels),
            MaxPool2d(kernel_size=(1, 4), stride=(1, 4)),
            Dropout2d(cnn_dropout))

        self.layer_3: Module = Sequential(
            Conv2d(in_channels=cnn_channels,
                   out_channels=cnn_channels,
                   kernel_size=5,
                   stride=1,
                   padding=2), ReLU(), BatchNorm2d(cnn_channels),
            MaxPool2d(kernel_size=(1, 2), stride=(1, 2)),
            Dropout2d(cnn_dropout))

        self.layers: List[Module] = [self.layer_1, self.layer_2, self.layer_3]
示例#18
0
    def __init__(self, num_classes):
        super(UNetMini, self).__init__()

        # Use padding 1 to mimic `padding='same'` in keras,
        # use this visualization tool https://ezyang.github.io/convolution-visualizer/index.html
        self.block1 = Sequential(
            Conv2d(1, 32, kernel_size=3, padding=1),
            ReLU(),
            Dropout2d(0.2),
            Conv2d(32, 32, kernel_size=3, padding=1),
            ReLU(),
        )
        self.pool1 = MaxPool2d((2, 2))

        self.block2 = Sequential(
            Conv2d(32, 64, kernel_size=3, padding=1),
            ReLU(),
            Dropout2d(0.2),
            Conv2d(64, 64, kernel_size=3, padding=1),
            ReLU(),
        )
        self.pool2 = MaxPool2d((2, 2))

        self.block3 = Sequential(Conv2d(64, 128, kernel_size=3, padding=1),
                                 ReLU(), Dropout2d(0.2),
                                 Conv2d(128, 128, kernel_size=3, padding=1),
                                 ReLU())

        self.up1 = UpsamplingNearest2d(scale_factor=2)
        self.block4 = Sequential(Conv2d(192, 64, kernel_size=3, padding=1),
                                 ReLU(), Dropout2d(0.2),
                                 Conv2d(64, 64, kernel_size=3, padding=1),
                                 ReLU())

        self.up2 = UpsamplingNearest2d(scale_factor=2)
        self.block5 = Sequential(Conv2d(96, 32, kernel_size=3, padding=1),
                                 ReLU(), Dropout2d(0.2),
                                 Conv2d(32, 32, kernel_size=3, padding=1),
                                 ReLU())

        self.conv2d = Conv2d(32, num_classes, kernel_size=1)
示例#19
0
 def __init__(self, n_class=33, ns=0.2, dp=0.1, seg_len=128):
     super(PatchDiscriminator, self).__init__()
     self.ns = ns
     self.conv1 = Conv2d(1, 64, kernel_size=5, stride=2)
     self.conv2 = Conv2d(64, 128, kernel_size=5, stride=2)
     self.conv3 = Conv2d(128, 256, kernel_size=5, stride=2)
     self.conv4 = Conv2d(256, 512, kernel_size=5, stride=2)
     self.conv5 = Conv2d(512, 512, kernel_size=5, stride=2)
     self.conv6 = Conv2d(512, 32, kernel_size=1)
     if seg_len == 128:
         self.conv7 = Conv2d(32, 1, kernel_size=(17, 4))
         self.conv_classify = Conv2d(32, n_class, kernel_size=(17, 4))
     elif seg_len == 64:
         self.conv7 = Conv2d(32, 1, kernel_size=(17, 2))
         self.conv_classify = Conv2d(32, n_class, kernel_size=(17, 2))
     else:
         raise NotImplementedError(
             "Segement length {} is not supported!".format(seg_len))
     self.drop1 = Dropout2d(p=dp)
     self.drop2 = Dropout2d(p=dp)
     self.drop3 = Dropout2d(p=dp)
     self.drop4 = Dropout2d(p=dp)
     self.drop5 = Dropout2d(p=dp)
     self.drop6 = Dropout2d(p=dp)
     self.ins_norm1 = InstanceNorm2d(self.conv1.out_channels)
     self.ins_norm2 = InstanceNorm2d(self.conv2.out_channels)
     self.ins_norm3 = InstanceNorm2d(self.conv3.out_channels)
     self.ins_norm4 = InstanceNorm2d(self.conv4.out_channels)
     self.ins_norm5 = InstanceNorm2d(self.conv5.out_channels)
     self.ins_norm6 = InstanceNorm2d(self.conv6.out_channels)
示例#20
0
    def __init__(self, nclass):
        super(Net_Period_Classification, self).__init__()

        self.conv1a = conv_bn(1, 32, 3, 1, 2, 2, True)
        self.drop1a = Dropout2d(.2, True)
        self.conv1b = conv_bn(32, 32, 3, 1, 2, 2, True)
        self.drop1b = Dropout2d(.2, True)
        # pool
        # (32, 112, 112)
        self.conv2 = inception_v2(32, 64)
        self.drop2 = Dropout2d(.4, True)
        # pool
        # (192, 56, 56)
        self.conv3 = inception_v2(192, 64)
        self.drop3 = Dropout2d(.4, True)
        # pool
        # (192, 28, 28)
        self.conv4 = inception_v2(192, 64)
        self.drop4 = Dropout2d(.4, True)
        # pool
        # (192, 14, 14)
        self.conv5 = Conv2d(192, 16, 1, 1, 0, 1)
        self.drop5 = Dropout2d(.4, True)
        # (16, 14, 14)
        # pool
        # (16, 7, 7)
        self.mlp1 = Linear(16 * 7 * 7, 64)
        self.drop01 = Dropout(.2, True)
        self.mlp2 = Linear(64, 64)
        self.drop02 = Dropout(.2, True)
        self.mlp3 = Linear(64, nclass)
    def __init__(self,
                 num_classes,
                 num_channels=3,
                 encoder_name='resnet34',
                 use_last_decoder=False):
        if not hasattr(self, 'first_layer_stride_two'):
            self.first_layer_stride_two = False
        if not hasattr(self, 'decoder_block'):
            self.decoder_block = UnetDecoderBlock
        if not hasattr(self, 'bottleneck_type'):
            self.bottleneck_type = ConvBottleneck

        self.filters = encoder_params[encoder_name]['filters']
        self.decoder_filters = encoder_params[encoder_name].get(
            'decoder_filters', self.filters[:-1])
        self.last_upsample_filters = encoder_params[encoder_name].get(
            'last_upsample', self.decoder_filters[0] // 2)

        super().__init__()

        self.num_channels = num_channels
        self.num_classes = num_classes
        self.bottlenecks = nn.ModuleList([
            self.bottleneck_type(self.filters[-i - 2] + f, f)
            for i, f in enumerate(reversed(self.decoder_filters[:]))
        ])

        self.decoder_stages = nn.ModuleList([
            self.get_decoder(idx)
            for idx in range(0, len(self.decoder_filters))
        ])

        if self.first_layer_stride_two:
            if use_last_decoder:
                self.last_upsample = self.decoder_block(
                    self.decoder_filters[0], self.last_upsample_filters,
                    self.last_upsample_filters)
            else:
                self.last_upsample = UpsamplingBilinear2d(scale_factor=2)
        self.final = self.make_final_classifier(
            self.last_upsample_filters if self.first_layer_stride_two else
            self.decoder_filters[0], num_classes)
        self._initialize_weights()
        self.dropout = Dropout2d(p=0.0)
        encoder = encoder_params[encoder_name]['init_op']()
        self.encoder_stages = nn.ModuleList([
            self.get_encoder(encoder, idx) for idx in range(len(self.filters))
        ])
        if encoder_params[encoder_name]['url'] is not None:
            self.initialize_encoder(encoder,
                                    encoder_params[encoder_name]['url'],
                                    num_channels != 3)
示例#22
0
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: tuple,
        stride: int,
        padding: int,
        dropout_p: float,
    ):
        """Initializes a Block of convolutional layers.

        Parameters
        ----------
        in_channels : int
            Number of input channels to a convolutional block.
        out_channels : int
            Number of output channels to a convolutional block.
        kernel_size : tuple
            Size of a kernel in a convolutional layer.
        stride : int
            Stride used in a convolutional layer.
        padding : int
            Number of padded pixels in a convolutional layer.
        dropout_p : float
            Probability of an element to be zeroed.
        """
        super(ConvBlock, self).__init__()

        self.layers = Sequential(
            Conv2d(in_channels, out_channels, kernel_size, stride, padding),
            PReLU(),
            BatchNorm2d(out_channels),
            Dropout2d(dropout_p),
            Conv2d(out_channels, out_channels, kernel_size, stride, padding),
            PReLU(),
            BatchNorm2d(out_channels),
            Dropout2d(dropout_p),
        )
示例#23
0
 def _init_layers(self):
     layers = [
         Conv2d(in_channels=3,
                out_channels=32,
                kernel_size=3,
                stride=2,
                padding=1),
         ReLU(),
         Conv2d(in_channels=32,
                out_channels=64,
                kernel_size=3,
                stride=2,
                padding=1),
         ReLU(),
         Conv2d(in_channels=64,
                out_channels=128,
                kernel_size=3,
                stride=2,
                padding=1),
         BatchNorm2d(128),
         ReLU(),
         Conv2d(in_channels=128,
                out_channels=256,
                kernel_size=3,
                stride=2,
                padding=1),
         ReLU(),
         Conv2d(in_channels=256,
                out_channels=512,
                kernel_size=3,
                stride=1,
                padding=1),
         BatchNorm2d(512),
         ReLU(),
         Conv2d(in_channels=512,
                out_channels=1024,
                kernel_size=3,
                stride=1,
                padding=1),
         BatchNorm2d(1024),
         ReLU(),
         Dropout2d(p=0.25),
         Flatten(),
         Dropout(p=0.4),
         Linear(9216, int(9216 / 9)),
         ReLU(),
         Dropout(p=0.4),
         BayesianLayer(int(9216 / 9), self.latent_size)
     ]
     return layers
示例#24
0
 def __init__(self, decoder_filters, filters, upsample_filters=None,
              decoder_block=DecoderBlock, bottleneck=ConcatBottleneck, dropout=0):
     super().__init__()
     self.decoder_filters = decoder_filters
     self.filters = filters
     self.decoder_block = decoder_block
     self.decoder_stages = nn.ModuleList([self._get_decoder(idx) for idx in range(0, len(decoder_filters))])
     self.bottlenecks = nn.ModuleList([bottleneck(self.filters[-i - 2] + f, f)
                                       for i, f in enumerate(reversed(decoder_filters))])
     self.dropout = Dropout2d(dropout) if dropout > 0 else None
     self.last_block = None
     if upsample_filters:
         self.last_block = decoder_block(decoder_filters[0], out_channels=upsample_filters)
     else:
         self.last_block = UpsamplingBilinear2d(scale_factor=2)
示例#25
0
    def __init__(self):
        super(Net, self).__init__()

        self.cnn_layers = Sequential(
            # Defining a 2D convolution layer
            Conv2d(1, 64, kernel_size=10, stride=1, padding=0),
            ReLU(inplace=True),
            BatchNorm2d(64),
            MaxPool2d(kernel_size=2, stride=2),

            Conv2d(64, 128, kernel_size=3, stride=1, padding=2),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),
            Dropout2d(0.10),
            
            Conv2d(128, 128, kernel_size=2, stride=1, padding=2),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),         
            Dropout2d(0.20)            
        )

        self.linear_layers = Sequential(
            Linear(32768,3)   # softmax layer
        )
示例#26
0
    def __init__(self, num_classes, input_channels=3):
        super(UNet, self).__init__()

        self._num_classes = num_classes
        self._input_channels = input_channels

        self.network_channels = ((self._input_channels, 64), (64, 128),
                                 (128, 256), (256, 512), (512, 1024, 512),
                                 (1024, 512, 256), (512, 256, 128),
                                 (256, 128, 64), (128, 64,
                                                  64), (64, self._num_classes))

        self.enc1 = _EncoderBlock(*self.network_channels[0])
        self.enc2 = _EncoderBlock(*self.network_channels[1])
        self.enc3 = _EncoderBlock(*self.network_channels[2])
        self.enc4 = _EncoderBlock(*self.network_channels[3])

        self.center = self._central_part(*self.network_channels[4])

        self.dec4 = _DecoderBlock(*self.network_channels[5])
        self.dec3 = _DecoderBlock(*self.network_channels[6])
        self.dec2 = _DecoderBlock(*self.network_channels[7])
        layers = [
            Conv2d(self.network_channels[8][0],
                   self.network_channels[8][1],
                   kernel_size=3,
                   padding=1),
            BatchNorm2d(self.network_channels[8][1]),
            ReLU(inplace=True),
            Conv2d(self.network_channels[8][1],
                   self.network_channels[8][1],
                   kernel_size=3,
                   padding=1),
            Dropout2d(0.2),
            Conv2d(self.network_channels[8][2],
                   self._num_classes,
                   kernel_size=3,
                   padding=1)
        ]

        self.out = Sequential(*layers)

        self.pool1 = MaxPool2d(kernel_size=2, stride=2)
        self.pool2 = MaxPool2d(kernel_size=2, stride=2)
        self.pool3 = MaxPool2d(kernel_size=2, stride=2)
        self.pool4 = MaxPool2d(kernel_size=2, stride=2)

        self._initialize_weights()
示例#27
0
    def __init__(self,
                 embed: nn.Module,
                 rnn_size: int,
                 rnn_layers: int,
                 rnn_dropout: float,
                 inp_dropout: float = 0.0,
                 rnn_bidirectional: bool = False,
                 rnn_layer_norm: bool = False,
                 rnn_type: str = "LSTM",
                 feat_size: int = 0,
                 countdown: bool = False,
                 **kwargs):
        super(RNNEncoder, self).__init__()

        assert rnn_type in ["LSTM", "GRU"]

        self.rnn_layer_norm = rnn_layer_norm
        self.rnn_type = rnn_type
        self.feat_size = feat_size
        self.countdown = countdown

        self.embed = embed
        self.inp_dropout = Dropout2d(inp_dropout)

        input_size = embed.embedding_dim + feat_size

        if self.countdown:
            self.Wt = nn.Parameter(torch.rand(1))
            input_size += 1

        rnn = nn.GRU if self.rnn_type == "GRU" else nn.LSTM
        self.rnn = rnn(input_size=input_size,
                       hidden_size=rnn_size,
                       num_layers=rnn_layers,
                       dropout=rnn_dropout if rnn_layers > 1 else 0.,
                       bidirectional=rnn_bidirectional,
                       batch_first=True)

        # define output feature size
        self.hidden_size = rnn_size
        if rnn_bidirectional:
            self.hidden_size *= 2

        if self.rnn_layer_norm:
            self.norm_rnn = nn.LayerNorm(self.rnn.hidden_size, eps=1e-6)

        self.init_weights()
示例#28
0
        def __init__(self):
            super(Net, self).__init__()

            self.conv_1 = Conv2d(in_channels=channels,
                                 out_channels=conv_out_channels,
                                 kernel_size=kernel_size,
                                 stride=conv_stride,
                                 padding=conv_pad)

            self.batch1 = BatchNorm2d(conv_out_channels)

            if layers > 1:
                self.conv_2 = Conv2d(in_channels=conv_out_channels,
                                     out_channels=conv_out_channels * 2,
                                     kernel_size=kernel_size,
                                     stride=conv_stride,
                                     padding=conv_pad)

                self.batch2 = BatchNorm2d(conv_out_channels * 2)

            if layers > 2:
                self.conv_3 = Conv2d(in_channels=conv_out_channels * 2,
                                     out_channels=conv_out_channels * 4,
                                     kernel_size=kernel_size,
                                     stride=conv_stride,
                                     padding=conv_pad)

                self.batch3 = BatchNorm2d(conv_out_channels * 4)

            if layers > 3:
                self.conv_4 = Conv2d(in_channels=conv_out_channels * 4,
                                     out_channels=conv_out_channels * 8,
                                     kernel_size=kernel_size,
                                     stride=conv_stride,
                                     padding=conv_pad)

                self.batch4 = BatchNorm2d(conv_out_channels * 8)

            self.pool = nn.MaxPool2d(maxpool, maxpool)

            self.dropout = Dropout2d(p=dropout)

            self.l_out = Linear(in_features=features_cat_size,
                                out_features=2,
                                bias=False)
    def __init__(self):
        super(Net, self).__init__()

        self.conv_1 = Conv2d(in_channels=channels,
                             out_channels=conv_out_channels,
                             kernel_size=kernel_size,
                             stride=conv_stride,
                             padding=conv_pad,
                             bias=True)

        self.lstm = ConvLSTM(conv_out_channels, lstm_out_channels, (3, 3), 1,
                             True, True, False)

        self.pool = nn.AvgPool3d(2, 2, 0)

        self.l_sample = Linear(in_features=features_cat_size,
                               out_features=11 * 11,
                               bias=True)

        self.conv_2 = Conv2d(in_channels=4,
                             out_channels=conv_out_channels * 4,
                             kernel_size=kernel_size,
                             stride=conv_stride,
                             padding=conv_pad,
                             bias=True)

        self.conv_3 = Conv2d(in_channels=conv_out_channels * 4,
                             out_channels=conv_out_channels * 4,
                             kernel_size=kernel_size,
                             stride=conv_stride,
                             padding=conv_pad,
                             bias=True)

        self.dropout = Dropout2d(p=0.5)

        self.batch1 = BatchNorm2d(conv_out_channels)
        self.batch2 = BatchNorm2d(conv_out_channels * 2)

        self.fc = Linear(in_features=features_cat_size2,
                         out_features=100,
                         bias=True)

        self.l_out = Linear(in_features=100,
                            out_features=NUM_CLASSES,
                            bias=False)
示例#30
0
    def __init__(self):
        super(Zijun, self).__init__()
        self.name = "Zijun"
        self.__version__ = "1.1"
        self.cnn_relu_stack = Sequential(

            # Conv Layer block 1
            Conv2d(3, 32, 3, 1),
            BatchNorm2d(32),
            LeakyReLU(inplace=True),
            Conv2d(32, 128, 3, 1),  # 64
            LeakyReLU(inplace=True),
            MaxPool2d(2, 2),
            Dropout2d(p=0.16),

            # Conv Layer block 2
            Conv2d(128, 192, 3, 1),
            BatchNorm2d(192),
            LeakyReLU(inplace=True),
            Dropout2d(p=0.16),
            Conv2d(192, 384, 3, 1),
            LeakyReLU(inplace=True),
            MaxPool2d(2, 2),
            Dropout2d(p=0.2),
            # Dropout(p=0.2),

            # Conv Layer block 3
            Conv2d(384, 512, 3, 1),
            BatchNorm2d(512),
            LeakyReLU(inplace=True),
            Dropout2d(p=0.2),
            Conv2d(512, 1024, 3, 1),
            LeakyReLU(inplace=True),
            BatchNorm2d(1024),
            Dropout2d(p=0.16),
            Conv2d(1024, 1024, 3, 1),
            LeakyReLU(inplace=True),
            MaxPool2d(3, 2),
            Dropout2d(p=0.16),
            Flatten(),
            Dropout(p=0.16),
            Linear(9216, 4096),
            LeakyReLU(inplace=True),
            Linear(4096, 2048),
            LeakyReLU(inplace=True),
            Dropout(p=0.1),
            Linear(2048, 512),
            LeakyReLU(inplace=True),
            Dropout(p=0.16),
            Linear(512, 10),
            # softmax (?)
        )