コード例 #1
0
    def __init__(self):
        super(MIL, self).__init__()
        h1, w1 = 200, 200
        h2, w2 = calcHW(h1, w1, kernel_size=8, stride=2)
        h3, w3 = calcHW(h2, w2, kernel_size=4, stride=2)
        h4, w4 = calcHW(h3, w3, kernel_size=4, stride=2)

        self.features = MetaSequential(
            MetaConv2d(in_channels=3, out_channels=32, kernel_size=8,
                       stride=2), MetaLayerNorm([32, h2, w2]), nn.ReLU(),
            MetaConv2d(in_channels=32,
                       out_channels=64,
                       kernel_size=4,
                       stride=2), MetaLayerNorm([64, h3, w3]), nn.ReLU(),
            MetaConv2d(in_channels=64,
                       out_channels=64,
                       kernel_size=4,
                       stride=2), MetaLayerNorm([64, h4, w4]), nn.ReLU(),
            SpatialSoftmax(h4, w4))

        self.policy = MetaSequential(
            MetaLinear(2 * 64 + 3, 128),
            nn.ReLU(),
            MetaLinear(128, 128),
            nn.ReLU(),
            MetaLinear(128, 128),
            nn.ReLU(),
            MetaLinear(128, 4),
        )
コード例 #2
0
def conv3x3(in_channels, out_channels, ksize=1, stride=None, **kwargs):
    if stride is None:
        return MetaSequential(
            MetaConv2d(in_channels,
                       out_channels,
                       kernel_size=ksize,
                       padding=1,
                       **kwargs),
            MetaBatchNorm2d(out_channels,
                            momentum=1.,
                            track_running_stats=False), nn.ReLU(),
            nn.MaxPool2d(2))
    else:
        return MetaSequential(
            MetaConv2d(in_channels,
                       out_channels,
                       stride=[stride, stride],
                       kernel_size=ksize,
                       padding=0,
                       **kwargs),
            MetaBatchNorm2d(out_channels,
                            momentum=1.,
                            track_running_stats=False),
            nn.ReLU(),
        )
コード例 #3
0
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = MetaConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = MetaBatchNorm2d(planes)
        self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = MetaBatchNorm2d(planes)

        self.shortcut = MetaSequential()
        if stride != 1 or in_planes != self.expansion*planes:
            self.shortcut = MetaSequential(
                MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
                MetaBatchNorm2d(self.expansion*planes)
            )
コード例 #4
0
    def _make_layer(self,
                    block,
                    planes,
                    stride=1,
                    drop_rate=0.0,
                    drop_block=False,
                    block_size=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = MetaSequential(
                MetaConv2d(self.inplanes,
                           planes * block.expansion,
                           kernel_size=1,
                           stride=1,
                           bias=False),
                MetaBatchNorm2d(planes * block.expansion,
                                track_running_stats=False),
            )

        layers = []
        layers.append(
            block(self.inplanes, planes, stride, downsample, drop_rate,
                  drop_block, block_size))
        self.inplanes = planes * block.expansion

        return MetaSequential(*layers)
コード例 #5
0
def conv1x1(in_planes, out_planes, stride=1):
    """1x1 convolution"""
    return MetaConv2d(in_planes,
                      out_planes,
                      kernel_size=1,
                      stride=stride,
                      bias=False)
コード例 #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 dilation=1,
                 groups=1,
                 bias=False,
                 use_bn=True,
                 bn_eps=1e-5,
                 activation=(lambda: nn.ReLU(inplace=True))):
        super(MetaConvBlock, self).__init__()
        self.activate = (activation is not None)
        self.use_bn = use_bn

        self.conv = MetaConv2d(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               dilation=dilation,
                               groups=groups,
                               bias=bias)
        if self.use_bn:
            self.bn = MetaBatchNorm2d(num_features=out_channels, eps=bn_eps)
        if self.activate:
            self.activ = get_activation_layer(activation)
コード例 #7
0
    def __init__(self,
                 block=BasicBlock,
                 layers=[2, 2, 2, 2],
                 zero_init_residual=False):
        super(ResNet, self).__init__()
        self.in_planes = 64
        self.conv1 = MetaConv2d(3,
                                64,
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                bias=False)
        self.bn1 = MetaBatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        # self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

        self._init_conv()

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
コード例 #8
0
def conv3x3(in_planes, out_planes, stride=1):
    return MetaConv2d(in_planes,
                      out_planes,
                      kernel_size=3,
                      stride=stride,
                      padding=1,
                      bias=False)
コード例 #9
0
def conv_block(in_channels,
               out_channels,
               bias=True,
               activation=nn.ReLU(inplace=True),
               use_dropout=False,
               p=0.1):
    res = MetaSequential(
        OrderedDict([
            ('conv',
             MetaConv2d(int(in_channels),
                        int(out_channels),
                        kernel_size=3,
                        padding=1,
                        bias=bias)),
            ('norm',
             MetaBatchNorm2d(int(out_channels),
                             momentum=1.,
                             track_running_stats=False)),
            ('relu', activation),
            ('pool', nn.MaxPool2d(2)),
        ]))

    if use_dropout:
        res.add_module('dropout', nn.Dropout2d(p))

    return res
コード例 #10
0
    def __init__(
        self, feature_scale=4, n_classes=1, is_deconv=True, in_channels=3, padding=1, device='cpu'
    ):
        super(ResUnet, self).__init__()
        self.is_deconv = is_deconv
        self.in_channels = in_channels
        self.feature_scale = feature_scale

        filters = [64, 128, 256, 512, 1024]
        filters = [int(x / self.feature_scale) for x in filters]

        # Downsampling
        
        self.conv1 = ResUnetConv2(self.in_channels, filters[0], device=device, padding=padding, is_first=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        
        self.conv2 = ResUnetConv2(filters[0], filters[1], device=device, padding=padding)
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        
        self.conv3 = ResUnetConv2(filters[1], filters[2], device=device, padding=padding)
        self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        
        self.center = ResUnetConv2(filters[2], filters[3], device=device, padding=padding)

        # Upsampling
        
        self.up_concat3 = ResUnetUp(filters[3], filters[2], self.is_deconv)
        self.up_concat2 = ResUnetUp(filters[2], filters[1], self.is_deconv)
        self.up_concat1 = ResUnetUp(filters[1], filters[0], self.is_deconv)


        # Final convolution layer (without any concat)
        self.final = MetaConv2d(filters[0], n_classes, kernel_size=1)
コード例 #11
0
def conv_block(in_channels, out_channels, **kwargs):
    return MetaSequential(
        OrderedDict([('conv', MetaConv2d(in_channels, out_channels, **kwargs)),
                     ('norm',
                      nn.BatchNorm2d(out_channels,
                                     momentum=1.,
                                     track_running_stats=False)),
                     ('relu', nn.ReLU()), ('pool', nn.MaxPool2d(2))]))
コード例 #12
0
ファイル: conv4.py プロジェクト: remiMZ/PMAML-ECAI20
def conv3x3(in_channels, out_channels, **kwargs):
    return MetaSequential(
        MetaConv2d(in_channels,
                   out_channels,
                   kernel_size=3,
                   padding=1,
                   **kwargs),
        MetaBatchNorm2d(out_channels, momentum=1., track_running_stats=False),
        nn.ReLU(), nn.MaxPool2d(2))
コード例 #13
0
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
    """3x3 convolution with padding"""
    return MetaConv2d(in_planes,
                      out_planes,
                      kernel_size=3,
                      stride=stride,
                      padding=dilation,
                      groups=groups,
                      bias=False,
                      dilation=dilation)
コード例 #14
0
 def __init__(self, nc, num_classes, block, num_blocks):
     super(ResNet, self).__init__()
     self.in_planes = 64
     self.conv1 = MetaConv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False)
     self.bn1 = MetaBatchNorm2d(64)
     self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
     self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
     self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
     self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
     self.linear = MetaLinear(512 * block.expansion, num_classes)
コード例 #15
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              drop_rate=0.0,
              drop_block=False,
              block_size=1):
     super(BasicBlock, self).__init__()
     self.conv1 = MetaConv2d(inplanes,
                             planes,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             bias=False)
     self.bn1 = MetaBatchNorm2d(planes, track_running_stats=False)
     self.relu1 = nn.LeakyReLU(0.1)
     self.conv2 = MetaConv2d(planes,
                             planes,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             bias=False)
     self.bn2 = MetaBatchNorm2d(planes, track_running_stats=False)
     self.relu2 = nn.LeakyReLU(0.1)
     self.conv3 = MetaConv2d(planes,
                             planes,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             bias=False)
     self.bn3 = MetaBatchNorm2d(planes, track_running_stats=False)
     self.relu3 = nn.LeakyReLU(0.1)
     self.maxpool = nn.MaxPool2d(stride)
     self.downsample = downsample
     self.stride = stride
     self.drop_rate = drop_rate
     self.num_batches_tracked = 0
     self.drop_block = drop_block
     self.block_size = block_size
     self.DropBlock = DropBlock(block_size=self.block_size)
コード例 #16
0
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, final=False, device='cpu', is_first=False):
        super(ResUnetConv2, self).__init__()
        self.device=device

        def init_layers(m):
            if type(m) == MetaConv2d:
                torch.nn.init.kaiming_uniform_(m.weight, nonlinearity='relu')


        if is_first:

            self.double_conv = MetaSequential(OrderedDict([
            ('conv1', MetaConv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)),
            ('norm1', nn.BatchNorm2d(out_channels)),# momentum=1.,track_running_stats=False)),
            ('relu1', nn.ReLU(inplace=True)),
            #('dropout1', nn.Dropout(0.3)),
            ('conv2', MetaConv2d(out_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding))
            #('dropout2', nn.Dropout(0.3))
            ]))

        else:
            
            self.double_conv = MetaSequential(OrderedDict([
            ('norm1', nn.BatchNorm2d(in_channels)),# momentum=1.,track_running_stats=False)),
            ('relu1', nn.ReLU(inplace=True)),
            #('dropout1', nn.Dropout(0.3)),
            ('conv1', MetaConv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)),
            ('norm2', nn.BatchNorm2d(out_channels)),# momentum=1.,track_running_stats=False)),
            ('relu2', nn.ReLU(inplace=True)),
            ('conv2', MetaConv2d(out_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding))
            #('dropout2', nn.Dropout(0.3))
            ]))

        self.addition_connection = MetaSequential(OrderedDict([
            ('conv1', MetaConv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)),
            ('norm2', nn.BatchNorm2d(out_channels))# momentum=1.,track_running_stats=False)),
            #('dropout2', nn.Dropout(0.3))
            ]))

        # use the module's apply function to recursively apply the initialization
        self.double_conv.apply(init_layers)
コード例 #17
0
def conv_block(in_channels, out_channels, no_max_pool, **kwargs):
    stride = 2 if no_max_pool else 1
    model = OrderedDict([('conv',
                          MetaConv2d(in_channels,
                                     out_channels,
                                     stride=stride,
                                     **kwargs)),
                         ('norm',
                          nn.BatchNorm2d(out_channels,
                                         momentum=1.,
                                         track_running_stats=False)),
                         ('relu', nn.ReLU())])

    if not no_max_pool:
        model['pool'] = nn.MaxPool2d(2)

    return MetaSequential(model)
コード例 #18
0
    def __init__(self,
                 input_shape,
                 Nhid=[1],
                 Mhid=[128],
                 out_channels=1,
                 kernel_size=[7],
                 stride=[1],
                 pool_size=[2],
                 alpha=[.9],
                 beta=[.85],
                 alpharp=[.65],
                 dropout=[0.5],
                 num_conv_layers=2,
                 num_mlp_layers=1,
                 deltat=1000,
                 lc_ampl=.5,
                 lif_layer_type = LIFLayer,
                 method='rtrl',
                 with_output_layer = True):

        self.with_output_layer = with_output_layer
        if with_output_layer:
            Mhid += [out_channels]
            num_mlp_layers += 1
        self.num_layers = num_layers = num_conv_layers + num_mlp_layers
        # If only one value provided, then it is duplicated for each layer
        if len(kernel_size) == 1:   kernel_size = kernel_size * num_conv_layers
        if stride is None: stride=[1]
        if len(stride) == 1:        stride = stride * num_conv_layers
        if pool_size is None: pool_size = [1]
        if len(pool_size) == 1: pool_size = pool_size * num_conv_layers
        if len(alpha) == 1:         alpha = alpha * num_layers
        if len(alpharp) == 1:       alpharp = alpharp * num_layers
        if len(beta) == 1:          beta = beta * num_layers
        if len(dropout) == 1:       self.dropout = dropout = dropout * num_layers
        if Nhid is None:          self.Nhid = Nhid = []
        if Mhid is None:          self.Mhid = Mhid = []


        super(MetaLenetDECOLLE, self).__init__()

        # Computing padding to preserve feature size
        padding = (np.array(kernel_size) - 1) // 2  # TODO try to remove padding



        # THe following lists need to be nn.ModuleList in order for pytorch to properly load and save the state_dict
        self.pool_layers = nn.ModuleList()
        self.dropout_layers = nn.ModuleList()
        self.input_shape = input_shape
        Nhid = [input_shape[0]] + Nhid
        self.num_conv_layers = num_conv_layers
        self.num_mlp_layers = num_mlp_layers

        feature_height = self.input_shape[1]
        feature_width = self.input_shape[2]

        for i in range(self.num_conv_layers):
            feature_height, feature_width = get_output_shape(
                [feature_height, feature_width], 
                kernel_size = kernel_size[i],
                stride = stride[i],
                padding = padding[i],
                dilation = 1)
            feature_height //= pool_size[i]
            feature_width //= pool_size[i]
            base_layer = MetaConv2d(Nhid[i], Nhid[i + 1], kernel_size[i], stride[i], padding[i])
            layer = lif_layer_type(base_layer,
                             alpha=alpha[i],
                             beta=beta[i],
                             alpharp=alpharp[i],
                             deltat=deltat,
                             do_detach= True if method == 'rtrl' else False)
            pool = nn.MaxPool2d(kernel_size=pool_size[i])
            readout = MetaLinear(int(feature_height * feature_width * Nhid[i + 1]), out_channels)

            # Readout layer has random fixed weights
            for param in readout.parameters():
                param.requires_grad = False
            self.reset_lc_parameters(readout, lc_ampl)

            dropout_layer = nn.Dropout(dropout[i])

            self.LIF_layers.append(layer)
            self.pool_layers.append(pool)
            self.readout_layers.append(readout)
            self.dropout_layers.append(dropout_layer)

        mlp_in = int(feature_height * feature_width * Nhid[-1])
        Mhid = [mlp_in] + Mhid
        for i in range(num_mlp_layers):
            base_layer = MetaLinear(Mhid[i], Mhid[i+1])
            layer = lif_layer_type(base_layer,
                             alpha=alpha[i],
                             beta=beta[i],
                             alpharp=alpharp[i],
                             deltat=deltat,
                             do_detach= True if method == 'rtrl' else False)
            
            if self.with_output_layer and i+1==num_mlp_layers:
                readout = nn.Identity()
                dropout_layer = nn.Identity()
            else:
                readout = MetaLinear(Mhid[i+1], out_channels)
                # Readout layer has random fixed weights
                for param in readout.parameters():
                    param.requires_grad = False
                self.reset_lc_parameters(readout, lc_ampl)
                dropout_layer = nn.Dropout(dropout[self.num_conv_layers+i])

            self.LIF_layers.append(layer)
            self.pool_layers.append(nn.Sequential())
            self.readout_layers.append(readout)
            self.dropout_layers.append(dropout_layer)
コード例 #19
0
def conv1x1(in_planes, out_planes, stride=1):
    return MetaConv2d(in_planes,
                      out_planes,
                      kernel_size=1,
                      stride=stride,
                      bias=False)
コード例 #20
0
    def __init__(self,
                 block,
                 layers,
                 num_classes=1000,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 norm_layer=None):
        super(ResNet, self).__init__()
        if norm_layer is None:
            norm_layer = MetaBatchNorm2d
        self._norm_layer = norm_layer

        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = MetaConv2d(3,
                                self.inplanes,
                                kernel_size=7,
                                stride=2,
                                padding=3,
                                bias=False)
        # self.conv1 = MetaConv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
        #                        bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = MetaLinear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, MetaConv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, (MetaBatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)