コード例 #1
0
def cifar_model_deep():
    model = nn.Sequential(nn.Conv2d(3, 8, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(8, 8, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(8, 8, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(8, 8, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(8 * 8 * 8, 100), nn.ReLU(),
                          nn.Linear(100, 10))
    return model
コード例 #2
0
def large_cifar_model():
    model = nn.Sequential(nn.Conv2d(3, 32, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 32, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 64, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(64, 64, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(64 * 8 * 8, 512), nn.ReLU(),
                          nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10))
    return model
コード例 #3
0
ファイル: model.py プロジェクト: rlacjfjin/GNN_branching
def cifar_model():
    model = nn.Sequential(nn.Conv2d(3, 16, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(16, 32, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(32 * 8 * 8, 100), nn.ReLU(),
                          nn.Linear(100, 10))
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            m.weight.data.normal_(0, math.sqrt(2. / n))
            m.bias.data.zero_()
    return model
コード例 #4
0
def mini_mnist_model():
    model = nn.Sequential(
        nn.Conv2d(1, 4, 2, stride=2, padding=1),
        nn.ReLU(),
        nn.Conv2d(4, 8, 2, stride=2),
        nn.ReLU(),
        Flatten(),
        nn.Linear(8 * 4 * 4, 50),
        nn.ReLU(),
        nn.Linear(50, 10),
    )
    return model
コード例 #5
0
ファイル: model.py プロジェクト: rlacjfjin/GNN_branching
def cifar_model_large():
    model = nn.Sequential(nn.Conv2d(3, 32, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 32, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 64, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(64, 64, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(64 * 8 * 8, 512), nn.ReLU(),
                          nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10))
    return model
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            m.weight.data.normal_(0, math.sqrt(2. / n))
            m.bias.data.zero_()
    return model
コード例 #6
0
def cifar_model():
    model = nn.Sequential(nn.Conv2d(3, 16, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(16, 32, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(32 * 8 * 8, 100), nn.ReLU(),
                          nn.Linear(100, 10))
    return model
コード例 #7
0
def mini_mnist_model_m1():
    model = nn.Sequential(nn.Conv2d(1, 8, 2, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(8, 16, 2, stride=2), nn.ReLU(), Flatten(),
                          nn.Linear(4 * 4 * 16, 50), nn.ReLU(),
                          nn.Linear(50, 10))
    return model
コード例 #8
0
def mnist_model_m1():
    model = nn.Sequential(nn.Conv2d(1, 4, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(4, 8, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(8 * 7 * 7, 50), nn.ReLU(),
                          nn.Linear(50, 10))
    return model
コード例 #9
0
def mnist_model():
    model = nn.Sequential(nn.Conv2d(1, 16, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(16, 32, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(32 * 7 * 7, 100), nn.ReLU(),
                          nn.Linear(100, 10))
    return model
コード例 #10
0
ファイル: model.py プロジェクト: rlacjfjin/GNN_branching
def load_adversarial_problem(filename, cls):
    if filename.endswith('mini.pth'):
        model = nn.Sequential(
            nn.Conv2d(1, 4, 2, stride=2, padding=1),
            nn.ReLU(),
            nn.Conv2d(4, 8, 2, stride=2),
            nn.ReLU(),
            Flatten(),
            nn.Linear(8 * 4 * 4, 50),
            nn.ReLU(),
            nn.Linear(50, 10),
        )
        model.load_state_dict(torch.load(filename)['state_dict'][0])
        no_grad(model)
        dataset = torch.load('./data/mini_mnist_test.pt')

    elif filename.endswith('small.pth'):
        model = nn.Sequential(nn.Conv2d(1, 16, 4, stride=2, padding=1),
                              nn.ReLU(),
                              nn.Conv2d(16, 32, 4, stride=2, padding=1),
                              nn.ReLU(), Flatten(), nn.Linear(32 * 7 * 7, 100),
                              nn.ReLU(), nn.Linear(100, 10))
        model.load_state_dict(torch.load(filename)['state_dict'][0])
        no_grad(model)
        # from torchvision import datasets, transforms
        # ds = datasets.MNIST('./data', train=True, download=True)
        # train_ds = {'data': ds.train_data.unsqueeze(1).float()/255.0,
        #             'labels': ds.train_labels}
        # torch.save(train_ds, './data/mnist_train.pt')
        # ds = datasets.MNIST('./data', train=False, download=True)
        # test_ds = {'data': ds.test_data.unsqueeze(1).float() / 255.0,
        #             'labels': ds.test_labels}
        # torch.save(test_ds, './data/mnist_test.pt')

        dataset = torch.load('./data/mnist_test.pt')
    else:
        raise NotImplementedError

    data = dataset['data']
    labels = dataset['labels']

    sample = data[0].type(torch.Tensor().type())
    label = int(labels[0])
    adv_label = 0
    if label == adv_label:
        adv_label += 1
    eps = 0.1

    # Create the input domain to the verification
    domain = torch.stack([
        torch.clamp(sample - eps, 0, None),
        torch.clamp(sample + eps, None, 1.0)
    ], -1)
    # Adjust the convolutional bound so as to make it mono-objective, just for
    # the target label.
    layers = [lay for lay in model]
    assert isinstance(layers[-1], nn.Linear)
    old_last = layers[-1]
    new_last = nn.Linear(old_last.in_features, 1)
    no_grad(new_last)

    new_last.weight.copy_(old_last.weight[label] - old_last.weight[adv_label])
    new_last.bias.copy_(old_last.bias[label] - old_last.bias[adv_label])

    layers[-1] = new_last

    return cls(layers), domain