def __init__(self, input_channel, hourglass_channels, boundaries, norm_type='BN', act_type='prelu', num_group=None): super().__init__() # (in, out) = (1, 256) self.pre = PreRes(input_channel, hourglass_channels, norm_type=norm_type, act_type=act_type, num_group=num_group) self.hourglass1 = HourglassBlock(hourglass_channels, norm_type=norm_type, act_type=act_type, num_group=num_group) self.hourglass2 = HourglassBlock(hourglass_channels, norm_type=norm_type, act_type=act_type, num_group=num_group) self.hourglass3 = HourglassBlock(hourglass_channels, norm_type=norm_type, act_type=act_type, num_group=num_group) self.hourglass4 = HourglassBlock(hourglass_channels, norm_type=norm_type, act_type=act_type, num_group=num_group) self.heatmap1 = nn.Sequential(*optUnit(opt_type='conv', in_ch=hourglass_channels, out_ch=boundaries, ker_size=7, stride=1)) self.heatmap2 = nn.Sequential(*optUnit(opt_type='conv', in_ch=hourglass_channels, out_ch=boundaries, ker_size=7, stride=1)) self.heatmap3 = nn.Sequential(*optUnit(opt_type='conv', in_ch=hourglass_channels, out_ch=boundaries, ker_size=7, stride=1)) self.heatmap4 = nn.Sequential(*optUnit(opt_type='conv', in_ch=hourglass_channels, out_ch=boundaries, ker_size=7, stride=1)) for _layer in self.modules(): if isinstance(_layer, opt_layer): nn.init.kaiming_normal_(_layer.weight, 2**0.5) if isinstance(_layer, norm_layer): nn.init.constant_(_layer.weight, 1.0) nn.init.constant_(_layer.bias, 0.0)
def __init__(self, input_channel, output_channel, norm_type='BN', act_type='prelu', num_group=None): super().__init__() # 1, 256 layers = optUnit(opt_type='conv', norm_type=norm_type, act_type=act_type, in_ch=input_channel, out_ch=output_channel // 4, ker_size=7, stride=2, num_group=num_group) # 升维 64-128 layers.append( ResidualBlock(output_channel // 4, output_channel // 2, stride=1, res_type=2, norm_type=norm_type, act_type=act_type, num_group=num_group)) # 降分辨率 layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) layers.append( ResidualBlock(output_channel // 2, output_channel // 2, stride=1, res_type=3, norm_type=norm_type, act_type=act_type, num_group=num_group)) layers += optUnit(norm_type=norm_type, act_type=act_type, out_ch=output_channel // 2) # 升维 128-256 layers.append( ResidualBlock(output_channel // 2, output_channel, stride=1, res_type=2, norm_type=norm_type, act_type=act_type, num_group=num_group)) self.body = nn.Sequential(*layers)
def __init__(self, boundaries, norm_type='IN', act_type='leakyRelu'): super().__init__() # B, 13, 64, 64 -> B, 14, 64, 64 -> B, 16, 4, 4 channel = [boundaries + 1, 64, 128, 256, 16] layers = optUnit(opt_type='conv', norm_type=norm_type, act_type=act_type, in_ch=channel[0], out_ch=channel[1], ker_size=3, stride=2) # 32 layers += optUnit(opt_type='conv', norm_type=norm_type, act_type=act_type, in_ch=channel[1], out_ch=channel[2], ker_size=3, stride=2) # 16 layers += optUnit(opt_type='conv', norm_type=norm_type, act_type=act_type, in_ch=channel[2], out_ch=channel[3], ker_size=3, stride=2) # 8 layers += optUnit(opt_type='conv', in_ch=channel[3], out_ch=channel[4], ker_size=3, stride=2) # 16, 4, 4 self.layers = nn.Sequential(*layers) for _layer in self.modules(): if isinstance(_layer, opt_layer): nn.init.kaiming_normal_(_layer.weight, 2 ** 0.5) if isinstance(_layer, norm_layer): nn.init.constant_(_layer.weight, 1.0) nn.init.constant_(_layer.bias, 0.0)
def __init__(self, channels, input_channels=14, norm_type='BN', act_type='prelu', num_group=None): # input: 14, channels: 256 super().__init__() # B, 14, 256, 256 -> B, 32, 64, 64 self.res0 = nn.Sequential(*( optUnit(opt_type='conv', norm_type=norm_type, act_type=act_type, in_ch=input_channels, out_ch=channels // 16, ker_size=7, stride=2, num_group=num_group) + [ nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ResidualBlock(channels // 16, channels // 8, 1, 2, norm_type=norm_type, act_type=act_type, num_group=num_group), ResidualBlock(channels // 8, channels // 8, 1, 2, norm_type=norm_type, act_type=act_type, num_group=num_group)])) # B, 32, 64, 64 -> B, 64, 64, 64 -> B, 64, 32, 32 self.fmf1 = FeatureMapFusion(channels // 8, None) self.res1 = nn.Sequential( ResidualBlock(channels // 4, channels // 4, 2, 2, 4, norm_type=norm_type, act_type=act_type, num_group=num_group), ResidualBlock(channels // 4, channels // 4, 1, 3, 4, norm_type=norm_type, act_type=act_type, num_group=num_group)) # B, 64, 32, 32 -> B, 128, 32, 32 -> B, 128, 16, 16 self.fmf2 = FeatureMapFusion(channels // 4, 2) self.res2 = nn.Sequential( ResidualBlock(channels // 2, channels // 2, 2, 2, 4, norm_type=norm_type, act_type=act_type, num_group=num_group), ResidualBlock(channels // 2, channels // 2, 1, 3, 4, norm_type=norm_type, act_type=act_type, num_group=num_group)) # B, 128, 16, 16 -> B, 256, 16, 16 -> B, 256, 8, 8 self.fmf3 = FeatureMapFusion(channels // 2, 4) self.res3 = nn.Sequential( ResidualBlock(channels, channels, 2, 2, 4, norm_type=norm_type, act_type=act_type, num_group=num_group), ResidualBlock(channels, channels, 1, 3, 4, norm_type=norm_type, act_type=act_type, num_group=num_group)) # B, 256, 8, 8 -> B, 256 -> B, 256 -> B, 196 self.output0 = nn.Conv2d(channels, channels, 8, 1, 0, bias=False) self.output1 = nn.Sequential(nn.BatchNorm1d(channels), nn.PReLU(), nn.Linear(channels, channels, False), nn.BatchNorm1d(channels), nn.Dropout2d(p=0.4), nn.BatchNorm1d(channels), nn.Linear(channels, 196, False)) for _layer in self.modules(): if isinstance(_layer, opt_layer): nn.init.xavier_normal_(_layer.weight, 2 ** 0.5) if isinstance(_layer, norm_layer): nn.init.constant_(_layer.weight, 1.0) nn.init.constant_(_layer.bias, 0.0)