def __init__(self, in_channels: int, out_channels: int, kernel_size: _SIZE_, stride: _SIZE_ = 1, padding: _SIZE_ = 0, dilation: _SIZE_ = 1, groups: int = 1, bias: bool = True, alpha_out: float = 0.5, activations: Optional[nn.Module] = nn.ReLU(True)): super(OctConvInBlock, self).__init__() h_out_channels = int((1 - alpha_out) * out_channels) l_out_channels = int(alpha_out * out_channels) self.h2h_conv: nn.Module = build_basic_block(in_channels, h_out_channels, kernel_size, stride, padding, dilation, groups, bias, None) self.h2l_conv: nn.Module = build_basic_block(in_channels, l_out_channels, kernel_size, stride, padding, dilation, groups, bias, None) self.activations: Optional[nn.Module] = activations
def __init__(self, in_channels: int, out_channels: int): super(DarkNetResBlock, self).__init__() mid_channels = in_channels // 2 self.block_1 = build_basic_block(in_channels, mid_channels, kernel_size=1, stride=1, padding=0, activations=nn.LeakyReLU()) self.block_2 = build_basic_block(mid_channels, out_channels, kernel_size=3, stride=1, padding=1, activations=nn.LeakyReLU())
def __init__(self, in_channels, num_cla): super(YOLOV3Head, self).__init__() mid_channels = in_channels * 2 self.block = build_basic_block(in_channels, mid_channels, 3, 1, 1, activations=nn.LeakyReLU(inplace=True)) self.head = nn.Conv2d(mid_channels, (5 + num_cla) * 3, 1, 1, 0)
def __init__(self, in_channels: int, out_channels: int, end=False): super(YOLOV3Layer, self).__init__() mid_channels_1 = out_channels * 2 mid_channels_2 = out_channels * 4 self.block_extract = nn.Sequential( build_basic_block(in_channels, mid_channels_1, 1, 1, 0, activations=nn.LeakyReLU(inplace=True)), build_basic_block(mid_channels_1, mid_channels_2, 3, 1, 1, activations=nn.LeakyReLU(inplace=True)), build_basic_block(mid_channels_2, mid_channels_1, 1, 1, 0, activations=nn.LeakyReLU(inplace=True)), build_basic_block(mid_channels_1, mid_channels_2, 3, 1, 1, activations=nn.LeakyReLU(inplace=True)), build_basic_block(mid_channels_2, mid_channels_1, 1, 1, 0, activations=nn.LeakyReLU(inplace=True)), ) self.block_smooth: Optional[nn.Module] = None if not end: self.block_smooth = build_basic_block( mid_channels_1, out_channels, 1, 1, 0, activations=nn.LeakyReLU(inplace=True)) self.init_params()
def __init__(self, in_channels: int, config: _CONFIG_, activations: Optional[nn.Module] = nn.LeakyReLU()): super(DarkNet53, self).__init__() self.channel = in_channels self.activations = activations self.block_1 = build_basic_block(self.channel, 32, 3, 1, 1, activations=self.activations) self.channel = 32 self.block_2 = self.build_res_block_layer(config[0]) self.block_3 = self.build_res_block_layer(config[1]) self.block_4 = self.build_res_block_layer(config[2]) self.block_5 = self.build_res_block_layer(config[3]) self.block_6 = self.build_res_block_layer(config[4])
def make_baisc_conv_block_layer(self, config: _CONFIG_) -> nn.Sequential: channels: List[int] = config['channels'] kernel_sizes: List[_SIZE_] = config['kernel_sizes'] strides: List[_SIZE_] = config['stride'] paddings: List[_SIZE_] = config['paddings'] dilations: List[int] = config['dilations'] groups: List[int] = config['groups'] biases: List[bool] = config['biases'] layers: List[nn.Module] = [] for channel, kernel_size, stride, padding, dilation, group, bias in zip( channels, kernel_sizes, strides, paddings, dilations, groups, biases): layers.append( build_basic_block(self.channel, channel, kernel_size, stride, padding, dilation, group, bias, self.activations)) self.channel = channel return nn.Sequential(*layers)
def build_res_block_layer(self, config: _CONFIG_): channels: List[int] = config['channels'] kernel_sizes: List[_SIZE_] = config['kernel_sizes'] strides: List[_SIZE_] = config['stride'] paddings: List[_SIZE_] = config['paddings'] dilations: List[int] = config['dilations'] groups: List[int] = config['groups'] biases: List[bool] = config['biases'] layers: List[nn.Module] = [ build_basic_block(self.channel, channels[0], kernel_sizes[0], strides[0], paddings[0], dilations[0], groups[0], biases[0], self.activations) ] self.channel = channels[0] for channel in channels[1:]: layers.append(DarkNetResBlock(self.channel, channel)) self.channel = channel return nn.Sequential(*layers)