def __init__(self, block, depth, num_classes, alpha, beta, dropout, num_frames, bL_ratio=2, without_t_stride=False, temporal_module=None, input_channels=3): layers = { 50: [3, 4, 6, 3], 101: [4, 8, 18, 3], 152: [5, 12, 30, 3] }[depth] num_channels = [64, 128, 256, 512] self.depth = depth self.alpha = alpha self.beta = beta self.num_frames = num_frames self.orig_num_frames = num_frames self.temporal_module = temporal_module self.without_t_stride = without_t_stride self.num_classes = num_classes self.bL_ratio = bL_ratio # make sure the number frames are valid self.inplanes = 64 super().__init__() self.conv1 = nn.Conv2d(input_channels, num_channels[0], kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(num_channels[0]) self.relu = nn.ReLU(inplace=True) self.b_conv0 = nn.Conv2d(num_channels[0], num_channels[0], kernel_size=3, stride=2, padding=1, bias=False) self.bn_b0 = nn.BatchNorm2d(num_channels[0]) self.l_conv0 = nn.Conv2d(num_channels[0], num_channels[0] // alpha, kernel_size=3, stride=1, padding=1, bias=False) self.bn_l0 = nn.BatchNorm2d(num_channels[0] // alpha) self.l_conv1 = nn.Conv2d(num_channels[0] // alpha, num_channels[0] // alpha, kernel_size=3, stride=2, padding=1, bias=False) self.bn_l1 = nn.BatchNorm2d(num_channels[0] // alpha) self.l_conv2 = nn.Conv2d(num_channels[0] // alpha, num_channels[0], kernel_size=1, stride=1, bias=False) self.bn_l2 = nn.BatchNorm2d(num_channels[0]) self.bl_init = nn.Conv2d(num_channels[0], num_channels[0], kernel_size=1, stride=1, bias=False) self.bn_bl_init = nn.BatchNorm2d(num_channels[0]) self.tam = temporal_module(duration=self.num_frames, channels=num_channels[0]) \ if temporal_module is not None else None self.layer1 = bLModule(block, num_channels[0], num_channels[0] * block.expansion, layers[0], alpha, beta, stride=2, num_frames=self.num_frames, temporal_module=temporal_module) if not self.without_t_stride: self.pool1 = TemporalPooling(self.num_frames, 3, 2, 'max') self.num_frames = self.num_frames // 2 self.layer2 = bLModule(block, num_channels[0] * block.expansion, num_channels[1] * block.expansion, layers[1], alpha, beta, stride=2, num_frames=self.num_frames, temporal_module=temporal_module) if not self.without_t_stride: self.pool2 = TemporalPooling(self.num_frames, 3, 2, 'max') self.num_frames = self.num_frames // 2 self.layer3 = bLModule(block, num_channels[1] * block.expansion, num_channels[2] * block.expansion, layers[2], alpha, beta, stride=1, num_frames=self.num_frames, temporal_module=temporal_module) if not self.without_t_stride: self.pool3 = TemporalPooling(self.num_frames, 3, 2, 'max') self.num_frames = self.num_frames // 2 self.layer4 = self._make_layer(block, num_channels[2] * block.expansion, num_channels[3] * block.expansion, layers[3], self.num_frames // bL_ratio, self.temporal_module, stride=2) self.gappool = nn.AdaptiveAvgPool2d(1) self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(num_channels[3] * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each block. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0)
def __init__(self, depth, num_frames, num_classes=1000, dropout=0.5, zero_init_residual=False, without_t_stride=False, temporal_module=None, pooling_method='max'): super(ResNet, self).__init__() self.pooling_method = pooling_method.lower() block = BasicBlock if depth < 50 else Bottleneck layers = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3] }[depth] self.depth = depth self.temporal_module = temporal_module self.num_frames = num_frames self.orig_num_frames = num_frames self.num_classes = num_classes self.without_t_stride = without_t_stride self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) if not self.without_t_stride: self.pool1 = TemporalPooling(self.num_frames, 3, 2, self.pooling_method) self.num_frames = self.num_frames // 2 self.layer2 = self._make_layer(block, 128, layers[1], stride=2) if not self.without_t_stride: self.pool2 = TemporalPooling(self.num_frames, 3, 2, self.pooling_method) self.num_frames = self.num_frames // 2 self.layer3 = self._make_layer(block, 256, layers[2], stride=2) if not self.without_t_stride: self.pool3 = TemporalPooling(self.num_frames, 3, 2, self.pooling_method) self.num_frames = self.num_frames // 2 self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0)
def __init__(self, num_frames, dropout, num_classes=1000, temporal_module=None, without_t_stride=False, pooling_method='max'): super().__init__() self.pooling_method = pooling_method.lower() self.orig_num_frames = num_frames self.num_frames = num_frames self.without_t_stride = without_t_stride self.temporal_module = temporal_module self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3) if temporal_module is not None: self.tam1 = temporal_module(duration=self.num_frames, channels=64) self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.conv2 = BasicConv2d(64, 64, kernel_size=1) if temporal_module is not None: self.tam2 = temporal_module(duration=self.num_frames, channels=64) self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1) if temporal_module is not None: self.tam3 = temporal_module(duration=self.num_frames, channels=192) self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) if not self.without_t_stride: self.t_pool1 = TemporalPooling(self.num_frames, 3, 2, self.pooling_method) self.num_frames = self.num_frames // 2 self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32) if temporal_module is not None: self.tam3a = temporal_module(duration=self.num_frames, channels=256) self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64) if temporal_module is not None: self.tam3b = temporal_module(duration=self.num_frames, channels=480) self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) if not self.without_t_stride: self.t_pool2 = TemporalPooling(self.num_frames, 3, 2, self.pooling_method) self.num_frames = self.num_frames // 2 self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64) if temporal_module is not None: self.tam4a = temporal_module(duration=self.num_frames, channels=512) self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64) if temporal_module is not None: self.tam4b = temporal_module(duration=self.num_frames, channels=512) self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64) if temporal_module is not None: self.tam4c = temporal_module(duration=self.num_frames, channels=512) self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64) if temporal_module is not None: self.tam4d = temporal_module(duration=self.num_frames, channels=528) self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128) if temporal_module is not None: self.tam4e = temporal_module(duration=self.num_frames, channels=832) self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) if not self.without_t_stride: self.t_pool3 = TemporalPooling(self.num_frames, 3, 2, self.pooling_method) self.num_frames = self.num_frames // 2 self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128) if temporal_module is not None: self.tam5a = temporal_module(duration=self.num_frames, channels=832) self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128) if temporal_module is not None: self.tam5b = temporal_module(duration=self.num_frames, channels=1024) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(1024, num_classes)