def _make_layer(block,
                    out_channels,
                    blocks,
                    stride=1,
                    in_channels=1,
                    bn=True,
                    convmap_channels=None):
        # first block should contain resampling layer if
        resample1 = None
        if stride != 1 or in_channels != out_channels * block.expansion:
            resample1 = nn.Sequential(
                models.resnet.conv1x1(in_channels,
                                      out_channels * block.expansion, stride),
                MaybeModule(bn,
                            nn.BatchNorm2d(out_channels * block.expansion)))
        layers = [block(in_channels, out_channels, stride, resample1, bn=bn)]

        # inbetween, "normal" blocks are inserted (no resampling needed)
        in_channels = out_channels * block_class.expansion
        for _ in range(1, blocks - 1):
            layers.append(block(in_channels, out_channels, bn=bn))

        # last block in layer, takes into account how many channels we want in the output
        resampleN = None
        if convmap_channels and convmap_channels != out_channels:
            out_channels = convmap_channels
            resampleN = nn.Sequential(
                models.resnet.conv1x1(in_channels, out_channels),
                MaybeModule(bn, nn.BatchNorm2d(out_channels)))
        layers.append(
            block(in_channels, out_channels, 1, resampleN, bn=bn,
                  expand=False))

        return nn.Sequential(*layers)
Beispiel #2
0
    def from_spec(cls, spec):
        features = load_with_spec(spec['conv'], module_by_kind)
        hidden = load_with_spec(spec['hidden'], module_by_kind)
        output = load_with_spec(spec['output'], module_by_kind)

        use_fc_from_conv = spec.get('use_fc_from_conv', False)
        # conv1x1_conv2hid = nn.Conv1d(spec['conv']['convmap_channels'], spec['hidden']['feature_dim'], (1, 1))
        if use_fc_from_conv:
            conv_adapter = ConvAdapter(spec['conv']['convmap_channels'],
                                       spec['hidden']['feature_dim'],
                                       bn=spec['conv']['bn'])
        else:
            conv_adapter = None
        fc_from_conv = MaybeModule(maybe=use_fc_from_conv, layer=conv_adapter)

        # conv1x1_hid2out = nn.Conv1d(spec['hidden']['feature_dim'], spec['output']['hidden_dim'], 1)
        use_fc_from_hidden = spec.get('use_fc_from_hidden', False)
        if use_fc_from_hidden:
            transformer_adapter = TransformerAdapter(
                spec['hidden']['feature_dim'],
                spec['output']['in_features'],
                bn=False)
        else:
            transformer_adapter = None
        fc_from_hidden = MaybeModule(maybe=use_fc_from_hidden,
                                     layer=transformer_adapter)

        return cls(features, hidden, output, fc_from_conv, fc_from_hidden)
    def __init__(self, inplanes, planes, stride=1, resample=None, bn=True, expand=True):
        super(BasicBlock, self).__init__(resample=resample)

        self.conv = nn.Sequential(

            models.resnet.conv3x3(inplanes, planes, stride),
            MaybeModule(bn, nn.BatchNorm2d(planes)),
            nn.LeakyReLU(inplace=True),

            models.resnet.conv3x3(planes, planes),
            MaybeModule(bn, nn.BatchNorm2d(planes)),
        )
def resnet_model_creator(in_channels=1, convmap_channels=128, blocks=1, conf='18', bn=True):
    block_class, blocks_out_channels, blocks_in_layer, stride_in_layer = {
        '18': resnet18,
        '34': resnet34,
        '50': resnet50,
        '101': resnet101,
        '152': resnet152,
    }[conf]

    pre_channels = 64
    pre_layers = nn.Sequential(
        nn.Conv2d(in_channels, pre_channels, kernel_size=7, stride=2, padding=3, bias=False),
        MaybeModule(bn, nn.BatchNorm2d(pre_channels)),
        nn.LeakyReLU(inplace=True),
        nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
    )
    resnet_feat = [pre_layers]

    def _make_layer(block, out_channels, blocks, stride=1, in_channels=1, bn=True, convmap_channels=None):
        # first block should contain resampling layer if
        resample1 = None
        if stride != 1 or in_channels != out_channels * block.expansion:
            resample1 = nn.Sequential(
                models.resnet.conv1x1(in_channels, out_channels * block.expansion, stride),
                MaybeModule(bn, nn.BatchNorm2d(out_channels * block.expansion)))
        layers = [block(in_channels, out_channels, stride, resample1, bn=bn)]

        # inbetween, "normal" blocks are inserted (no resampling needed)
        in_channels = out_channels * block_class.expansion
        for _ in range(1, blocks - 1):
            layers.append(block(in_channels, out_channels, bn=bn))

        # last block in layer, takes into account how many channels we want in the output
        resampleN = None
        if convmap_channels and convmap_channels != out_channels:
            out_channels = convmap_channels
            resampleN = nn.Sequential(
                models.resnet.conv1x1(in_channels, out_channels),
                MaybeModule(bn, nn.BatchNorm2d(out_channels)))
        layers.append(block(in_channels, out_channels, 1, resampleN, bn=bn, expand=False))

        return nn.Sequential(*layers)

    # Build the requested number of resnet blocks
    assert 1 <= blocks <= 4, 'number of blocks requested for ResNet model should be 1, 2, 3, or 4'

    in_channels = pre_channels
    for block_idx, out_channels, numblocks, stride in zip(
            range(0, blocks), blocks_out_channels, blocks_in_layer, stride_in_layer):

        convmap_channels_supplied = convmap_channels if block_idx == blocks - 1 else None
        layer = _make_layer(block_class, out_channels, numblocks, stride,
                            in_channels=in_channels, convmap_channels=convmap_channels_supplied, bn=bn)

        in_channels = out_channels * block_class.expansion
        resnet_feat.append(layer)

    return nn.Sequential(*resnet_feat)
 def _make_layers(cfg, batch_norm=False, in_channels=1):
     layers = []
     for v in cfg:
         if v == 'M':
             layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
         else:
             layers += [
                 nn.Conv2d(in_channels, v, kernel_size=3, padding=1),
                 MaybeModule(batch_norm, nn.BatchNorm2d(v)),
                 nn.LeakyReLU(inplace=True)
             ]
             in_channels = v
     return nn.Sequential(*layers)
Beispiel #6
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              bn=True,
              conv_class=nn.Conv1d):
     super().__init__()
     bn_class = {
         nn.Conv1d: nn.BatchNorm1d,
         nn.Conv2d: nn.BatchNorm2d
     }[conv_class]
     modules = (conv_class(in_channels, out_channels, kernel_size),
                MaybeModule(bn, bn_class(out_channels)),
                nn.LeakyReLU(inplace=True))
     self.block = nn.Sequential(*modules)
    def __init__(self, in_features, out_features, normalization='batch_norm', dropout=0.):
        super(_BasicLinear, self).__init__()

        # callable functions used for creating classes
        normalization_modules = {
            'batch_norm': nn.BatchNorm1d,
            'instance_norm': nn.InstanceNorm1d,
            'none': lambda num_features: MaybeModule(False, None)
        }

        self.layer = nn.Sequential(*(
            nn.Linear(in_features, out_features),
            normalization_modules[normalization](out_features),
            nn.LeakyReLU(inplace=True),
            nn.Dropout(dropout)
        ))