Exemple #1
0
    def __init__(self, nfilters=None, nclasses=None, nmasks=None, level=None, filter_size=None, linear=128, input_size=28,
                 debug=False, scale_noise=1, act='relu', use_act=False, first_filter_size=None, pool_type=None,
                 dropout=None, unique_masks=False, train_masks=False, noise_type='uniform', mix_maps=None):
        super(LeNet, self).__init__()
        if filter_size == 5:
            n = 5
        else:
            n = 4

        if input_size == 32:
            first_channels = 3
        elif input_size == 28:
            first_channels = 1

        if pool_type == 'max':
            pool = nn.MaxPool2d
        elif pool_type == 'avg':
            pool = nn.AvgPool2d
        else:
            print('\n\nPool Type {} is not supported/understood\n\n'.format(pool_type))
            return

        self.linear1 = nn.Linear(nfilters*n*n, linear)
        self.linear2 = nn.Linear(linear, nclasses)
        self.dropout = nn.Dropout(p=dropout)
        self.act = act_fn(act)
        self.batch_norm = nn.BatchNorm1d(linear)

        self.first_layers = nn.Sequential(
            PerturbLayer(in_channels=first_channels, out_channels=nfilters, nmasks=nmasks, level=level*scale_noise,
                         filter_size=first_filter_size, use_act=use_act, act=act, unique_masks=unique_masks,
                         train_masks=train_masks, noise_type=noise_type, input_size=input_size, mix_maps=mix_maps),
            pool(kernel_size=3, stride=2, padding=1),

            PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size,
                         use_act=True, act=act, unique_masks=unique_masks, debug=debug, train_masks=train_masks,
                         noise_type=noise_type, input_size=input_size//2, mix_maps=mix_maps),
            pool(kernel_size=3, stride=2, padding=1),

            PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size,
                         use_act=True, act=act, unique_masks=unique_masks, train_masks=train_masks, noise_type=noise_type,
                         input_size=input_size//4, mix_maps=mix_maps),
            pool(kernel_size=3, stride=2, padding=1),
        )

        self.last_layers = nn.Sequential(
            self.dropout,
            self.linear1,
            self.batch_norm,
            self.act,
            self.dropout,
            self.linear2,
        )
Exemple #2
0
    def __init__(self,
                 block,
                 nblocks=None,
                 avgpool=None,
                 nfilters=None,
                 nclasses=None,
                 nmasks=None,
                 input_size=32,
                 level=None,
                 filter_size=None,
                 first_filter_size=None,
                 use_act=False,
                 train_masks=False,
                 mix_maps=None,
                 act=None,
                 scale_noise=1,
                 unique_masks=False,
                 debug=False,
                 noise_type=None,
                 pool_type=None):
        super(PerturbResNet, self).__init__()
        self.nfilters = nfilters
        self.unique_masks = unique_masks
        self.noise_type = noise_type
        self.train_masks = train_masks
        self.pool_type = pool_type
        self.mix_maps = mix_maps
        self.act = act_fn(act)  # (7) Felix added this

        # layers = [PerturbLayer(in_channels=3, out_channels=nfilters, nmasks=nmasks, level=level*scale_noise,
        # debug=debug, filter_size=first_filter_size, use_act=use_act, train_masks=train_masks, input_size=input_size,
        # act=act, unique_masks=self.unique_masks, noise_type=self.noise_type, mix_maps=mix_maps)]

        layers = [
            PerturbLayerFirst(in_channels=3,
                              out_channels=3 * nfilters,
                              nmasks=nfilters * 5,
                              level=level * scale_noise * 20,
                              debug=debug,
                              filter_size=first_filter_size,
                              use_act=use_act,
                              train_masks=train_masks,
                              input_size=input_size,
                              act=act,
                              unique_masks=self.unique_masks,
                              noise_type=self.noise_type,
                              mix_maps=mix_maps)
        ]  # scale noise 20x at 1st layer
        # 256x3  X (5) = 3840
        # 128x3  X (5) = 1920

        # (3) Felix modified this

        if first_filter_size == 7:
            layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))

        # self.pre_layers = nn.Sequential(*layers)
        self.pre_layers = nn.Sequential(
            *layers,  # (4) Felix modified this, not really necessary. 
            nn.Conv2d(self.nfilters * 3 * 1,
                      self.nfilters,
                      kernel_size=1,
                      stride=1,
                      bias=False
                      ),  # mapping 10*nfilters back to nfilters with 1x1 conv
            nn.BatchNorm2d(self.nfilters),
            self.act)

        self.layer1 = self._make_layer(block,
                                       1 * nfilters,
                                       nblocks[0],
                                       stride=1,
                                       level=level,
                                       nmasks=nmasks,
                                       use_act=True,
                                       filter_size=filter_size,
                                       act=act,
                                       input_size=input_size)
        self.layer2 = self._make_layer(block,
                                       2 * nfilters,
                                       nblocks[1],
                                       stride=2,
                                       level=level,
                                       nmasks=nmasks,
                                       use_act=True,
                                       filter_size=filter_size,
                                       act=act,
                                       input_size=input_size)
        self.layer3 = self._make_layer(block,
                                       4 * nfilters,
                                       nblocks[2],
                                       stride=2,
                                       level=level,
                                       nmasks=nmasks,
                                       use_act=True,
                                       filter_size=filter_size,
                                       act=act,
                                       input_size=input_size // 2)
        self.layer4 = self._make_layer(block,
                                       8 * nfilters,
                                       nblocks[3],
                                       stride=2,
                                       level=level,
                                       nmasks=nmasks,
                                       use_act=True,
                                       filter_size=filter_size,
                                       act=act,
                                       input_size=input_size // 4)
        self.avgpool = nn.AvgPool2d(avgpool, stride=1)
        self.linear = nn.Linear(8 * nfilters * block.expansion, nclasses)
Exemple #3
0
    def __init__(self,
                 in_channels=None,
                 out_channels=None,
                 nmasks=None,
                 level=None,
                 filter_size=None,
                 debug=False,
                 use_act=False,
                 stride=1,
                 act=None,
                 unique_masks=False,
                 mix_maps=None,
                 train_masks=False,
                 noise_type='uniform',
                 input_size=None):
        super(PerturbLayer, self).__init__()
        self.nmasks = nmasks  #per input channel
        self.unique_masks = unique_masks  # same set or different sets of nmasks per input channel
        self.train_masks = train_masks  #whether to treat noise masks as regular trainable parameters of the model
        self.level = level  # noise magnitude
        self.filter_size = filter_size  #if filter_size=0, layers=(perturb, conv_1x1) else layers=(conv_NxN), N=filter_size
        self.use_act = use_act  #whether to use activation immediately after perturbing input (set it to False for the first layer)
        self.act = act_fn(
            act)  #relu, prelu, rrelu, elu, selu, tanh, sigmoid (see utils)
        self.debug = debug  #print input, mask, output values for each batch
        self.noise_type = noise_type  #normal or uniform
        self.in_channels = in_channels
        self.input_size = input_size  #input image resolution (28 for MNIST, 32 for CIFAR), needed to construct masks
        self.mix_maps = mix_maps  #whether to apply second 1x1 convolution after perturbation, to mix output feature maps

        if filter_size == 1:
            padding = 0
            bias = True
        elif filter_size == 3 or filter_size == 5:
            padding = 1
            bias = False
        elif filter_size == 7:
            stride = 2
            padding = 3
            bias = False

        if self.filter_size > 0:
            self.noise = None
            self.layers = nn.Sequential(
                nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size=filter_size,
                          padding=padding,
                          stride=stride,
                          bias=bias), nn.BatchNorm2d(out_channels), self.act)
        else:
            noise_channels = in_channels if self.unique_masks else 1
            shape = (
                1, noise_channels, self.nmasks, input_size, input_size
            )  # can't dynamically reshape masks in forward if we want to train them
            self.noise = nn.Parameter(torch.Tensor(*shape),
                                      requires_grad=self.train_masks)
            if noise_type == "uniform":
                self.noise.data.uniform_(-1, 1)
            elif self.noise_type == 'normal':
                self.noise.data.normal_()
            else:
                print('\n\nNoise type {} is not supported / understood\n\n'.
                      format(self.noise_type))

            if nmasks != 1:
                if out_channels % in_channels != 0:
                    print(
                        '\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n'
                    )
                groups = in_channels
            else:
                groups = 1

            self.layers = nn.Sequential(
                #self.act,      #TODO orig code uses ReLU here
                #nn.BatchNorm2d(out_channels), #TODO: orig code uses BN here
                nn.Conv2d(in_channels * self.nmasks,
                          out_channels,
                          kernel_size=1,
                          stride=1,
                          groups=groups),
                nn.BatchNorm2d(out_channels),
                self.act,
            )
            if self.mix_maps:
                self.mix_layers = nn.Sequential(
                    nn.Conv2d(out_channels,
                              out_channels,
                              kernel_size=1,
                              stride=1,
                              groups=1),
                    nn.BatchNorm2d(out_channels),
                    self.act,
                )
    def __init__(self,
                 in_channels=None,
                 out_channels=None,
                 nmasks=None,
                 level=None,
                 filter_size=None,
                 debug=False,
                 use_act=False,
                 act=None,
                 stride=1,
                 unique_masks=False,
                 mix_maps=None,
                 train_masks=False,
                 noise_type='uniform',
                 input_size=None):
        """
        :param nmasks: number of perturbation masks per input channel
        :param level:  noise magnitude
        :param filter_size: if filter_size=0, layers=(perturb, conv_1x1) else layers=(conv_NxN), N=filter_size
        :param debug: debug mode or not
        :param use_act: whether to use activation immediately after perturbing input (set it to False for the first layer)
        :param stride: stride
        :param unique_masks: same set or different sets of nmasks per input channel
        :param mix_maps: whether to apply second 1x1 convolution after perturbation, to mix output feature maps
        :param train_masks: whether to treat noise masks as regular trainable parameters of the model
        :param noise_type: normal or uniform
        :param input_size: input image resolution (28 for MNIST, 32 for CIFAR), needed to construct masks
        """
        super(PerturbLayerFirst, self).__init__()
        self.nmasks = nmasks
        self.unique_masks = unique_masks
        self.train_masks = train_masks
        self.level = level
        self.filter_size = filter_size
        self.use_act = use_act
        self.act = act_fn('sigmoid')
        self.debug = debug
        self.noise_type = noise_type
        self.in_channels = in_channels
        self.input_size = input_size
        self.mix_maps = mix_maps

        if filter_size == 1:
            padding = 0
            bias = True
        elif filter_size == 3 or filter_size == 5:
            padding = 1
            bias = False
        elif filter_size == 7:
            stride = 2
            padding = 3
            bias = False

        if self.filter_size > 0:
            self.noise = None
            self.layers = nn.Sequential(
                nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size=filter_size,
                          padding=padding,
                          stride=stride,
                          bias=bias), nn.BatchNorm2d(out_channels), self.act)

        else:  # layers=(perturb, conv_1x1)
            noise_channels = in_channels if self.unique_masks else 1
            shape = (1, noise_channels, self.nmasks, input_size, input_size)

            self.noise = nn.Parameter(torch.Tensor(*shape),
                                      requires_grad=self.train_masks)
            if noise_type == "uniform":
                self.noise.data.uniform_(-1, 1)
            elif self.noise_type == 'normal':
                self.noise.data.normal_()
            else:
                print('\n\nNoise type {} is not supported / understood\n\n'.
                      format(self.noise_type))

            if nmasks != 1:
                if out_channels % in_channels != 0:
                    print(
                        '\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n'
                    )
                groups = in_channels
            else:
                groups = 1

            self.layers = nn.Sequential(
                nn.BatchNorm2d(in_channels * self.nmasks),
                self.act,
                nn.Conv2d(in_channels * self.nmasks,
                          out_channels,
                          kernel_size=1,
                          stride=1,
                          groups=groups),
                nn.BatchNorm2d(out_channels),
                self.act,
            )
            if self.mix_maps:
                self.mix_layers = nn.Sequential(
                    nn.Conv2d(out_channels,
                              out_channels,
                              kernel_size=1,
                              stride=1,
                              groups=1),
                    nn.BatchNorm2d(out_channels),
                    self.act,
                )
    def __init__(self,
                 in_channels=None,
                 out_channels=None,
                 nmasks=None,
                 level=None,
                 filter_size=None,
                 debug=False,
                 use_act=False,
                 stride=1,
                 act=None,
                 unique_masks=False,
                 mix_maps=None,
                 train_masks=False,
                 noise_type='uniform',
                 input_size=None):
        super(PerturbLayer, self).__init__()
        self.nmasks = nmasks
        self.unique_masks = unique_masks
        self.train_masks = train_masks
        self.level = level
        self.filter_size = filter_size
        self.use_act = use_act
        self.act = act_fn(act)
        self.debug = debug
        self.noise_type = noise_type
        self.in_channels = in_channels
        self.input_size = input_size
        self.mix_maps = mix_maps

        if filter_size == 1:
            padding = 0
            bias = True
        elif filter_size == 3 or filter_size == 5:
            padding = 1
            bias = False
        elif filter_size == 7:
            stride = 2
            padding = 3
            bias = False

        if self.filter_size > 0:
            self.noise = None
            self.layers = nn.Sequential(
                nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size=filter_size,
                          padding=padding,
                          stride=stride,
                          bias=bias), nn.BatchNorm2d(out_channels), self.act)
        else:
            noise_channels = in_channels if self.unique_masks else 1
            shape = (
                1, noise_channels, self.nmasks, input_size, input_size
            )  # can't dynamically reshape masks in forward if we want to train them
            self.noise = nn.Parameter(torch.Tensor(*shape),
                                      requires_grad=self.train_masks)
            if noise_type == "uniform":
                self.noise.data.uniform_(-1, 1)
            elif self.noise_type == 'normal':
                self.noise.data.normal_()
            else:
                print('\n\nNoise type {} is not supported / understood\n\n'.
                      format(self.noise_type))

            if nmasks != 1:
                if out_channels % in_channels != 0:
                    print(
                        '\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n'
                    )
                groups = in_channels
            else:
                groups = 1

            self.layers = nn.Sequential(
                nn.Conv2d(in_channels * self.nmasks,
                          out_channels,
                          kernel_size=1,
                          stride=1,
                          groups=groups),
                nn.BatchNorm2d(out_channels),
                self.act,
            )
            if self.mix_maps:
                self.mix_layers = nn.Sequential(
                    nn.Conv2d(out_channels,
                              out_channels,
                              kernel_size=1,
                              stride=1,
                              groups=1),
                    nn.BatchNorm2d(out_channels),
                    self.act,
                )