def __init__(self, n_inp, n_out, se=False, se_ratio=16): super().__init__() self.scale1 = conv_block_3x3(n_inp, n_out // 2, 'relu') self.scale2 = conv_block_3x3(n_out // 2, n_out // 4, 'relu') self.scale3 = conv_block_3x3(n_out // 4, n_out - n_out // 4 - n_out // 2, None) if n_inp != n_out: self.skip = conv_block_1x1(n_inp, n_out, None) else: self.skip = Identity() if se: self.se_block = SEBlock(n_out, r=se_ratio)
def __init__(self, bw=24, depth=6, center_depth=2, n_inputs=1, n_classes=1, activation='relu', norm_encoder='BN', norm_decoder='BN', norm_center='BN'): super().__init__() # Preparing the modules dict modules = OrderedDict() modules['down1'] = Encoder(n_inputs, bw, activation=activation, normalization=norm_encoder) # Automatically creating the Encoder based on the depth and width mul_out = None for level in range(2, depth + 1): mul_in = 2**(level - 2) mul_out = 2**(level - 1) layer = Encoder(bw * mul_in, bw * mul_out, activation=activation, normalization=norm_encoder) modules['down' + str(level)] = layer if mul_out is None: raise ValueError( 'The depth parameter is wrong. Cannot determine the output size of the encoder' ) # Creating the center modules['center'] = nn.Sequential(*[ conv_block_3x3(bw * mul_out, bw * mul_out, activation, norm_center) for _ in range(center_depth) ]) # Automatically creating the decoder for level in reversed(range(2, depth + 1)): mul_in = 2**(level - 1) layer = Decoder(2 * bw * mul_in, bw * mul_in // 2, activation=activation, normalization=norm_decoder) modules['up' + str(level)] = layer modules['up1'] = Decoder(bw + bw, bw, activation=activation, normalization=norm_decoder) modules['mixer'] = nn.Conv2d(bw, n_classes, kernel_size=1, padding=0, stride=1, bias=True) self.__dict__['_modules'] = modules
def __init__(self, inp_channels, out_channels, depth=2, activation='relu', normalization='BN'): super().__init__() self.layers = nn.Sequential() for i in range(depth): tmp = [] if i == 0: tmp.append( conv_block_3x3(inp_channels, out_channels, activation, normalization)) else: tmp.append( conv_block_3x3(out_channels, out_channels, activation, normalization)) self.layers.add_module('conv_3x3_{}'.format(i), nn.Sequential(*tmp))
def __init__(self, n_inp, n_out, se=False, se_ratio=16): super().__init__() self.bottleneck = conv_block_1x1(n_inp, n_out // 2, 'relu') self.conv = conv_block_3x3(n_out // 2, n_out // 2, 'relu') self.out = conv_block_1x1(n_out // 2, n_out, None) if n_inp != n_out: self.skip = conv_block_1x1(n_inp, n_out, None) else: self.skip = Identity() if se: self.se_block = SEBlock(n_out, r=se_ratio)