Пример #1
0
    def __init__(self, device, dataset, input_channel, input_size, width,
                 linear_size):
        super(cnn_2layer, self).__init__()

        mean, sigma = get_mean_sigma(device, dataset, IBP=True)
        self.normalizer = Normalization(mean, sigma)

        self.layers = [
            Normalization(mean, sigma),
            Conv2d(input_channel,
                   4 * width,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size),
            ReLU((4 * width, input_size // 2, input_size // 2)),
            Conv2d(4 * width,
                   8 * width,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size // 2),
            ReLU((8 * width, input_size // 4, input_size // 4)),
            Flatten(),
            Linear(8 * width * (input_size // 4) * (input_size // 4),
                   linear_size),
            ReLU(linear_size),
            Linear(linear_size, 10),
        ]
Пример #2
0
    def __init__(self,
                 device,
                 dataset,
                 n_class=10,
                 input_size=32,
                 input_channel=3,
                 width1=1,
                 width2=1,
                 width3=1,
                 linear_size=100):
        super(ConvMedBig, self).__init__()

        mean, sigma = get_mean_sigma(device, dataset)
        self.normalizer = Normalization(mean, sigma)

        layers = [
            Normalization(mean, sigma),
            Conv2d(input_channel,
                   16 * width1,
                   3,
                   stride=1,
                   padding=1,
                   dim=input_size),
            ReLU((16 * width1, input_size, input_size)),
            Conv2d(16 * width1,
                   16 * width2,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size // 2),
            ReLU((16 * width2, input_size // 2, input_size // 2)),
            Conv2d(16 * width2,
                   32 * width3,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size // 2),
            ReLU((32 * width3, input_size // 4, input_size // 4)),
            Flatten(),
            Linear(32 * width3 * (input_size // 4) * (input_size // 4),
                   linear_size),
            ReLU(linear_size),
            Linear(linear_size, n_class),
        ]
        self.blocks = Sequential(*layers)
Пример #3
0
    def __init__(self, device, dataset, adv_pre, input_size, net, net_dim):
        super(UpscaleNet, self).__init__()
        self.net = net
        self.net_dim = net_dim
        self.blocks = []

        if input_size == net_dim:
            self.transform = None
        else:
            self.transform = Upsample(size=self.net_dim, mode="nearest", align_corners=False,consolidate_errors=False)
            self.blocks += [self.transform]

        if adv_pre:
            self.blocks += [Scale(2, fixed=True), Bias(-1, fixed=True)]
            self.normalization = Sequential(*self.blocks)
        else:
            mean, sigma = get_mean_sigma(device, dataset)
            self.normalization = Normalization(mean, sigma)
            self.blocks += [self.normalization]

        self.blocks += [self.net]
Пример #4
0
    def __init__(self,
                 device,
                 dataset,
                 sizes,
                 n_class=10,
                 input_size=32,
                 input_channel=3):
        super(FFNN, self).__init__()

        mean, sigma = get_mean_sigma(device, dataset)
        self.normalizer = Normalization(mean, sigma)

        layers = [
            Flatten(),
            Linear(input_size * input_size * input_channel, sizes[0]),
            ReLU(sizes[0])
        ]
        for i in range(1, len(sizes)):
            layers += [
                Linear(sizes[i - 1], sizes[i]),
                ReLU(sizes[i]),
            ]
        layers += [Linear(sizes[-1], n_class)]
        self.blocks = Sequential(*layers)
Пример #5
0
    def __init__(self, device, dataset, input_channel, input_size,
                 linear_size):
        super(cnn_IBP_large, self).__init__()

        mean, sigma = get_mean_sigma(device, dataset, IBP=True)
        self.normalizer = Normalization(mean, sigma)

        self.layers = [
            Normalization(mean, sigma),
            Conv2d(input_channel, 64, 3, stride=1, padding=1, dim=input_size),
            ReLU((64, input_size, input_size)),
            Conv2d(64, 64, 3, stride=1, padding=1, dim=input_size),
            ReLU((64, input_size, input_size)),
            Conv2d(64, 128, 3, stride=2, padding=1, dim=input_size // 2),
            ReLU((128, input_size // 2, input_size // 2)),
            Conv2d(128, 128, 3, stride=1, padding=1, dim=input_size // 2),
            ReLU((128, input_size // 2, input_size // 2)),
            Conv2d(128, 128, 3, stride=1, padding=1, dim=input_size // 2),
            ReLU((128, input_size // 2, input_size // 2)),
            Flatten(),
            Linear(128 * (input_size // 2) * (input_size // 2), linear_size),
            ReLU(linear_size),
            Linear(linear_size, 10),
        ]
Пример #6
0
    def __init__(self, device, dataset, n_blocks, n_class=10, input_size=32, input_channel=3, block='basic',
                 in_planes=32, net_dim=None, widen_factor=1, pooling="global"):
        super(MyResnet, self).__init__(net_dim=None if net_dim == input_size else net_dim)
        if block == 'basic':
            self.res_block = BasicBlock
        elif block == 'preact':
            self.res_block = PreActBlock
        elif block == 'wide':
            self.res_block = WideBlock
        elif block == 'fixup':
            self.res_block = FixupBasicBlock
        else:
            assert False
        self.n_layers = sum(n_blocks)
        mean, sigma = get_mean_sigma(device, dataset)
        dim = input_size

        k = widen_factor

        layers = [Normalization(mean, sigma),
                  Conv2d(input_channel, in_planes, kernel_size=3, stride=1, padding=1, bias=(block == "wide"), dim=dim)]

        if not block == "wide":
            layers += [Bias() if block == 'fixup' else BatchNorm2d(in_planes),
                       ReLU((in_planes, input_size, input_size))]

        strides = [1, 2] + ([2] if len(n_blocks) > 2 else []) + [1] * max(0,(len(n_blocks)-3))

        n_filters = in_planes
        for n_block, n_stride in zip(n_blocks, strides):
            if n_stride > 1:
                n_filters *= 2
            dim, block_layers = self.get_block(in_planes, n_filters*k, n_block, n_stride, dim=dim)
            in_planes = n_filters*k
            layers += [block_layers]

        if block == 'fixup':
            layers += [Bias()]
        else:
            layers += [BatchNorm2d(n_filters*k)]

        if block == "wide":
            layers += [ReLU((n_filters*k, dim, dim))]

        if pooling == "global":
            layers += [GlobalAvgPool2d()]
            N = n_filters * k
        elif pooling == "None":      # old networks miss pooling layer and wont load
            N = n_filters * dim * dim * k
        elif isinstance(pooling, int):
            layers += [AvgPool2d(pooling)]
            dim = dim//pooling
            N = n_filters * dim * dim * k

        layers += [Flatten(), ReLU(N)]

        if block == 'fixup':
            layers += [Bias()]

        layers += [Linear(N, n_class)]

        self.blocks = Sequential(*layers)

        # Fixup initialization
        if block == 'fixup':
            for m in self.modules():
                if isinstance(m, FixupBasicBlock):
                    conv1, conv2 = m.residual[1].conv, m.residual[5].conv
                    nn.init.normal_(conv1.weight,
                                    mean=0,
                                    std=np.sqrt(2 / (conv1.weight.shape[0] * np.prod(conv1.weight.shape[2:]))) * self.n_layers ** (-0.5))
                    nn.init.constant_(conv2.weight, 0)
                elif isinstance(m, nn.Linear):
                    nn.init.constant_(m.weight, 0)
                    nn.init.constant_(m.bias, 0)
Пример #7
0
    def __init__(self, device, dataset, n_class=10, input_size=32, input_channel=3, conv_widths=None,
                 kernel_sizes=None, linear_sizes=None, depth_conv=None, paddings=None, strides=None,
                 dilations=None, pool=False, net_dim=None, bn=False, max=False, scale_width=True):
        super(myNet, self).__init__(net_dim=None if net_dim == input_size else net_dim)
        if kernel_sizes is None:
            kernel_sizes = [3]
        if conv_widths is None:
            conv_widths = [2]
        if linear_sizes is None:
            linear_sizes = [200]
        if paddings is None:
            paddings = [1]
        if strides is None:
            strides = [2]
        if dilations is None:
            dilations = [1]
        if net_dim is None:
            net_dim = input_size

        if len(conv_widths) != len(kernel_sizes):
            kernel_sizes = len(conv_widths) * [kernel_sizes[0]]
        if len(conv_widths) != len(paddings):
            paddings = len(conv_widths) * [paddings[0]]
        if len(conv_widths) != len(strides):
            strides = len(conv_widths) * [strides[0]]
        if len(conv_widths) != len(dilations):
            dilations = len(conv_widths) * [dilations[0]]

        self.n_class=n_class
        self.input_size=input_size
        self.input_channel=input_channel
        self.conv_widths=conv_widths
        self.kernel_sizes=kernel_sizes
        self.paddings=paddings
        self.strides=strides
        self.dilations = dilations
        self.linear_sizes=linear_sizes
        self.depth_conv=depth_conv
        self.net_dim = net_dim
        self.bn=bn
        self.max=max

        mean, sigma = get_mean_sigma(device, dataset)
        layers = self.blocks
        layers += [Normalization(mean, sigma)]

        N = net_dim
        n_channels = input_channel
        self.dims += [(n_channels,N,N)]

        for width, kernel_size, padding, stride, dilation in zip(conv_widths, kernel_sizes, paddings, strides, dilations):
            if scale_width:
                width *= 16
            N = int(np.floor((N + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1))
            layers += [Conv2d(n_channels, int(width), kernel_size, stride=stride, padding=padding, dilation=dilation)]
            if self.bn:
                layers += [BatchNorm2d(int(width))]
            if self.max:
                layers += [MaxPool2d(int(width))]
            layers += [ReLU((int(width), N, N))]
            n_channels = int(width)
            self.dims += 2*[(n_channels,N,N)]


        if depth_conv is not None:
            layers += [Conv2d(n_channels, depth_conv, 1, stride=1, padding=0),
                       ReLU((n_channels, N, N))]
            n_channels = depth_conv
            self.dims += 2*[(n_channels,N,N)]

        if pool:
            layers += [GlobalAvgPool2d()]
            self.dims += 2 * [(n_channels, 1, 1)]
            N=1

        layers += [Flatten()]
        N = n_channels * N ** 2
        self.dims += [(N,)]


        for width in linear_sizes:
            if width == 0:
                continue
            layers += [Linear(int(N), int(width)),
                       ReLU(width)]
            N = width
            self.dims+=2*[(N,)]

        layers += [Linear(N, n_class)]
        self.dims+=[(n_class,)]

        self.blocks = Sequential(*layers)