Ejemplo n.º 1
0
    def __init__(self, bottleneck2d):
        super(Bottleneck3d, self).__init__()

        spatial_stride = bottleneck2d.conv2.stride[0]

        self.conv1 = inflate.inflate_conv(
            bottleneck2d.conv1, time_dim=1, center=True)
        self.bn1 = inflate.inflate_batch_norm(bottleneck2d.bn1)

        self.conv2 = inflate.inflate_conv(
            bottleneck2d.conv2,
            time_dim=3,
            time_padding=1,
            time_stride=spatial_stride,
            center=True)
        self.bn2 = inflate.inflate_batch_norm(bottleneck2d.bn2)

        self.conv3 = inflate.inflate_conv(
            bottleneck2d.conv3, time_dim=1, center=True)
        self.bn3 = inflate.inflate_batch_norm(bottleneck2d.bn3)

        self.relu = torch.nn.ReLU(inplace=True)

        if bottleneck2d.downsample is not None:
            self.downsample = inflate_downsample(
                bottleneck2d.downsample, time_stride=spatial_stride)
        else:
            self.downsample = None

        self.stride = bottleneck2d.stride
Ejemplo n.º 2
0
    def __init__(self, resnet2d, frame_nb=16, class_nb=1000, conv_class=False):
        """
        Args:
            conv_class: Whether to use convolutional layer as classifier to
                adapt to various number of frames
        """
        super(I3ResNet, self).__init__()
        resnet2d = copy.deepcopy(resnet2d)
        self.conv_class = conv_class

        self.conv1 = inflate.inflate_conv(
            resnet2d.conv1, time_dim=3, time_padding=1, center=True)
        self.bn1 = inflate.inflate_batch_norm(resnet2d.bn1)
        self.relu = torch.nn.ReLU(inplace=True)
        self.maxpool = inflate.inflate_pool(
            resnet2d.maxpool, time_dim=3, time_padding=1, time_stride=2)

        self.layer1 = inflate_reslayer(resnet2d.layer1)
        self.layer2 = inflate_reslayer(resnet2d.layer2)
        self.layer3 = inflate_reslayer(resnet2d.layer3)
        self.layer4 = inflate_reslayer(resnet2d.layer4)

        if conv_class:
            self.avgpool = inflate.inflate_pool(resnet2d.avgpool, time_dim=1)
            self.classifier = torch.nn.Conv3d(
                in_channels=2048,
                out_channels=class_nb,
                kernel_size=(1, 1, 1),
                bias=True)
        else:
            final_time_dim = int(math.ceil(frame_nb / 16))
            self.avgpool = inflate.inflate_pool(
                resnet2d.avgpool, time_dim=final_time_dim)
            self.fc = inflate.inflate_linear(resnet2d.fc, 1)
Ejemplo n.º 3
0
    def __init__(self, denselayer2d, inflate_convs=False):
        super(_DenseLayer3d, self).__init__()

        self.inflate_convs = inflate_convs
        for name, child in denselayer2d.named_children():
            if isinstance(child, torch.nn.BatchNorm2d):
                self.add_module(name, inflate.inflate_batch_norm(child))
            elif isinstance(child, torch.nn.ReLU):
                self.add_module(name, child)
            elif isinstance(child, torch.nn.Conv2d):
                kernel_size = child.kernel_size[0]
                if inflate_convs and kernel_size > 1:
                    # Pad input in the time dimension
                    assert kernel_size % 2 == 1, 'kernel size should be\
                            odd be got {}'.format(kernel_size)
                    pad_size = int(kernel_size / 2)
                    pad_time = ReplicationPad3d(
                        (0, 0, 0, 0, pad_size, pad_size))
                    self.add_module('padding.1', pad_time)
                    # Add time dimension of same dim as the space one
                    self.add_module(name,
                                    inflate.inflate_conv(child, kernel_size))
                else:
                    self.add_module(name, inflate.inflate_conv(child, 1))
            else:
                raise ValueError('{} is not among handled layer types'.format(
                    type(child)))
        self.drop_rate = denselayer2d.drop_rate
Ejemplo n.º 4
0
 def __init__(self, transition2d, inflate_conv=False):
     """
     Inflates transition layer from transition2d
     """
     super(_Transition3d, self).__init__()
     for name, layer in transition2d.named_children():
         if isinstance(layer, torch.nn.BatchNorm2d):
             self.add_module(name, inflate.inflate_batch_norm(layer))
         elif isinstance(layer, torch.nn.ReLU):
             self.add_module(name, layer)
         elif isinstance(layer, torch.nn.Conv2d):
             if inflate_conv:
                 pad_time = ReplicationPad3d((0, 0, 0, 0, 1, 1))
                 self.add_module('padding.1', pad_time)
                 self.add_module(name, inflate.inflate_conv(layer, 3))
             else:
                 self.add_module(name, inflate.inflate_conv(layer, 1))
         elif isinstance(layer, torch.nn.AvgPool2d):
             self.add_module(name, inflate.inflate_pool(layer, 2))
         else:
             raise ValueError('{} is not among handled layer types'.format(
                 type(layer)))
Ejemplo n.º 5
0
def inflate_features(features, inflate_block_convs=False):
    """
    Inflates the feature extractor part of DenseNet by adding the corresponding
    inflated modules and transfering the inflated weights
    """
    features3d = torch.nn.Sequential()
    transition_nb = 0  # Count number of transition layers
    for layer_idx, (name, child) in enumerate(features.named_children()):
        if layer_idx > 1:
            if isinstance(child, torch.nn.BatchNorm2d):
                features3d.add_module(name, inflate.inflate_batch_norm(child))
            elif isinstance(child, torch.nn.ReLU):
                features3d.add_module(name, child)
            elif isinstance(child, torch.nn.Conv2d):
                features3d.add_module(name, inflate.inflate_conv(child, 1))
            elif isinstance(child, torch.nn.MaxPool2d) or isinstance(
                    child, torch.nn.AvgPool2d):
                features3d.add_module(name, inflate.inflate_pool(child))
            elif isinstance(child, torchvision.models.densenet._DenseBlock):
                # Add dense block
                block = torch.nn.Sequential()
                for nested_name, nested_child in child.named_children():
                    assert isinstance(nested_child,
                                      torchvision.models.densenet._DenseLayer)
                    block.add_module(
                        nested_name,
                        _DenseLayer3d(nested_child,
                                      inflate_convs=inflate_block_convs))
                features3d.add_module(name, block)
            elif isinstance(child, torchvision.models.densenet._Transition):
                features3d.add_module(name, _Transition3d(child))
                transition_nb = transition_nb + 1
            else:
                raise ValueError('{} is not among handled layer types'.format(
                    type(child)))
    return features3d, transition_nb
Ejemplo n.º 6
0
def inflate_downsample(downsample2d, time_stride=1):
    downsample3d = torch.nn.Sequential(
        inflate.inflate_conv(
            downsample2d[0], time_dim=1, time_stride=time_stride, center=True),
        inflate.inflate_batch_norm(downsample2d[1]))
    return downsample3d