def __init__(self, stage, output_branches, c, bn_momentum): super(StageModule, self).__init__() self.stage = stage self.output_branches = output_branches self.branches = nn.ModuleList() for i in range(self.stage): w = c * (2 ** i) branch = nn.Sequential( BasicBlock(w, w, bn_momentum=bn_momentum), BasicBlock(w, w, bn_momentum=bn_momentum), BasicBlock(w, w, bn_momentum=bn_momentum), BasicBlock(w, w, bn_momentum=bn_momentum), ) self.branches.append(branch) self.fuse_layers = nn.ModuleList() # for each output_branches (i.e. each branch in all cases but the very last one) for i in range(self.output_branches): self.fuse_layers.append(nn.ModuleList()) for j in range(self.stage): # for each branch if i == j: self.fuse_layers[-1].append(nn.Sequential()) # Used in place of "None" because it is callable elif i < j: self.fuse_layers[-1].append(nn.Sequential( nn.Conv2d(c * (2 ** j), c * (2 ** i), kernel_size=(1, 1), stride=(1, 1), bias=False), nn.BatchNorm2d(c * (2 ** i), eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.Upsample(scale_factor=(2.0 ** (j - i)), mode='nearest'), )) elif i > j: ops = [] for k in range(i - j - 1): ops.append(nn.Sequential( nn.Conv2d(c * (2 ** j), c * (2 ** j), kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(c * (2 ** j), eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.ReLU(inplace=True), )) ops.append(nn.Sequential( nn.Conv2d(c * (2 ** j), c * (2 ** i), kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(c * (2 ** i), eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), )) self.fuse_layers[-1].append(nn.Sequential(*ops)) self.relu = nn.ReLU(inplace=True)
def __init__(self, stage, output_branches, c, bn_momentum): super(StageModule, self).__init__() self.stage = stage self.output_branches = output_branches self.branches = nn.ModuleList() for i in range(self.stage): w = c * (2 ** i) branch = nn.Sequential( BasicBlock(w, w, bn_momentum=bn_momentum), BasicBlock(w, w, bn_momentum=bn_momentum), BasicBlock(w, w, bn_momentum=bn_momentum), BasicBlock(w, w, bn_momentum=bn_momentum), ) self.branches.append(branch) self.fuse_layers = nn.ModuleList() # para cada output_branches (es decir, cada rama en todos los casos menos el último) for i in range(self.output_branches): self.fuse_layers.append(nn.ModuleList()) for j in range(self.stage): #para cada rama if i == j: self.fuse_layers[-1].append(nn.Sequential()) #Se usa en lugar de "None" porque se puede llamar elif i < j: self.fuse_layers[-1].append(nn.Sequential( nn.Conv2d(c * (2 ** j), c * (2 ** i), kernel_size=(1, 1), stride=(1, 1), bias=False), nn.BatchNorm2d(c * (2 ** i), eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.Upsample(scale_factor=(2.0 ** (j - i)), mode='nearest'), )) elif i > j: ops = [] for k in range(i - j - 1): ops.append(nn.Sequential( nn.Conv2d(c * (2 ** j), c * (2 ** j), kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(c * (2 ** j), eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.ReLU(inplace=True), )) ops.append(nn.Sequential( nn.Conv2d(c * (2 ** j), c * (2 ** i), kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(c * (2 ** i), eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), )) self.fuse_layers[-1].append(nn.Sequential(*ops)) self.relu = nn.ReLU(inplace=True)
def __init__(self, c=48, nof_joints=17, bn_momentum=0.1): super(HigherHRNet, self).__init__() # Input (stem net) self.conv1 = nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.bn1 = nn.BatchNorm2d(64, eps=1e-05, momentum=bn_momentum, affine=True, track_running_stats=True) self.conv2 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.bn2 = nn.BatchNorm2d(64, eps=1e-05, momentum=bn_momentum, affine=True, track_running_stats=True) self.relu = nn.ReLU(inplace=True) # Stage 1 (layer1) - First group of bottleneck (resnet) modules downsample = nn.Sequential( nn.Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False), nn.BatchNorm2d(256, eps=1e-05, momentum=bn_momentum, affine=True, track_running_stats=True), ) self.layer1 = nn.Sequential( Bottleneck(64, 64, downsample=downsample), Bottleneck(256, 64), Bottleneck(256, 64), Bottleneck(256, 64), ) # Fusion layer 1 (transition1) - Creation of the first two branches (one full and one half resolution) self.transition1 = nn.ModuleList([ nn.Sequential( nn.Conv2d(256, c, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), nn.BatchNorm2d(c, eps=1e-05, momentum=bn_momentum, affine=True, track_running_stats=True), nn.ReLU(inplace=True), ), nn.Sequential( nn. Sequential( # Double Sequential to fit with official pretrained weights nn.Conv2d(256, c * (2**1), kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(c * (2**1), eps=1e-05, momentum=bn_momentum, affine=True, track_running_stats=True), nn.ReLU(inplace=True), )), ]) # Stage 2 (stage2) - Second module with 1 group of bottleneck (resnet) modules. This has 2 branches self.stage2 = nn.Sequential( StageModule(stage=2, output_branches=2, c=c, bn_momentum=bn_momentum), ) # Fusion layer 2 (transition2) - Creation of the third branch (1/4 resolution) self.transition2 = nn.ModuleList([ nn.Sequential( ), # None, - Used in place of "None" because it is callable nn.Sequential( ), # None, - Used in place of "None" because it is callable nn.Sequential( nn. Sequential( # Double Sequential to fit with official pretrained weights nn.Conv2d(c * (2**1), c * (2**2), kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(c * (2**2), eps=1e-05, momentum=bn_momentum, affine=True, track_running_stats=True), nn.ReLU(inplace=True), ) ), # ToDo Why the new branch derives from the "upper" branch only? ]) # Stage 3 (stage3) - Third module with 4 groups of bottleneck (resnet) modules. This has 3 branches self.stage3 = nn.Sequential( StageModule(stage=3, output_branches=3, c=c, bn_momentum=bn_momentum), StageModule(stage=3, output_branches=3, c=c, bn_momentum=bn_momentum), StageModule(stage=3, output_branches=3, c=c, bn_momentum=bn_momentum), StageModule(stage=3, output_branches=3, c=c, bn_momentum=bn_momentum), ) # Fusion layer 3 (transition3) - Creation of the fourth branch (1/8 resolution) self.transition3 = nn.ModuleList([ nn.Sequential( ), # None, - Used in place of "None" because it is callable nn.Sequential( ), # None, - Used in place of "None" because it is callable nn.Sequential( ), # None, - Used in place of "None" because it is callable nn.Sequential( nn. Sequential( # Double Sequential to fit with official pretrained weights nn.Conv2d(c * (2**2), c * (2**3), kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(c * (2**3), eps=1e-05, momentum=bn_momentum, affine=True, track_running_stats=True), nn.ReLU(inplace=True), ) ), # ToDo Why the new branch derives from the "upper" branch only? ]) # Stage 4 (stage4) - Fourth module with 3 groups of bottleneck (resnet) modules. This has 4 branches self.stage4 = nn.Sequential( StageModule(stage=4, output_branches=4, c=c, bn_momentum=bn_momentum), StageModule(stage=4, output_branches=4, c=c, bn_momentum=bn_momentum), StageModule(stage=4, output_branches=1, c=c, bn_momentum=bn_momentum), ) # New HigherHRNet section # Final blocks self.num_deconvs = 1 self.final_layers = [] # "We only predict tagmaps at the lowest resolution, instead of using all resolutions" # At the lower resolution, both heatmaps and tagmaps are predicted for every joint # -> output channels are nof_joints * 2 self.final_layers.append( nn.Conv2d(c, nof_joints * 2, kernel_size=(1, 1), stride=(1, 1))) for i in range(self.num_deconvs): self.final_layers.append( nn.Conv2d(c, nof_joints, kernel_size=(1, 1), stride=(1, 1))) self.final_layers = nn.ModuleList(self.final_layers) # Deconv layers self.deconv_layers = [] input_channels = c for i in range(self.num_deconvs): if True: # See comment above about "nof_joints * 2" at lower resolution if i == 0: input_channels += nof_joints * 2 else: input_channels += nof_joints output_channels = c deconv_kernel, padding, output_padding = 4, 1, 0 layers = [] layers.append( nn.Sequential( nn.ConvTranspose2d(input_channels, output_channels, kernel_size=deconv_kernel, stride=2, padding=padding, output_padding=output_padding, bias=False), nn.BatchNorm2d(output_channels, momentum=bn_momentum), nn.ReLU(inplace=True))) for _ in range(4): layers.append( nn.Sequential(BasicBlock(output_channels, output_channels), )) self.deconv_layers.append(nn.Sequential(*layers)) input_channels = output_channels self.deconv_layers = nn.ModuleList(self.deconv_layers)