Esempio n. 1
0
def create_cam(config):
    if not os.path.exists(config.result_path):
        os.mkdir(config.result_path)

    test_loader, num_class = utils.get_testloader(config.dataset,
                                                  config.dataset_path,
                                                  config.img_size)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    mdl = model.NLayerDiscriminator()
    #mdl = nn.DataParallel(mdl)
    cnn = mdl.to(device)
    state_dict = torch.load(os.path.join(config.model_path, config.model_name))
    cnn.load_state_dict(state_dict)
    finalconv_name = 'conv'

    # hook
    feature_blobs = []

    def hook_feature(module, input, output):
        feature_blobs.append(output.cpu().data.numpy())

    cnn._modules.get(finalconv_name).register_forward_hook(hook_feature)
    params = list(cnn.parameters())
    # get weight only from the last layer(linear)
    weight_softmax = np.squeeze(params[-2].cpu().data.numpy())

    def returnCAM(feature_conv, weight_softmax, class_idx):
        size_upsample = (config.img_size, config.img_size)
        _, nc, h, w = feature_conv.shape
        output_cam = []
        cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w)))
        cam = cam.reshape(h, w)
        cam = cam - np.min(cam)
        cam_img = cam / np.max(cam)
        cam_img = np.uint8(255 * cam_img)
        output_cam.append(cv2.resize(cam_img, size_upsample))
        return output_cam

    for i, (image_tensor, label) in enumerate(test_loader):
        image_PIL = transforms.ToPILImage()(image_tensor[0])
        image_PIL.save(os.path.join(config.result_path, 'img%d.png' % (i + 1)))

        image_tensor = image_tensor.to(device)
        logit, _ = cnn(image_tensor)
        h_x = F.softmax(logit, dim=1).data.squeeze()
        probs, idx = h_x.sort(0, True)
        print("True label : %d, Predicted label : %d, Probability : %.2f" %
              (label.item(), idx[0].item(), probs[0].item()))
        CAMs = returnCAM(feature_blobs[0], weight_softmax, [idx[0].item()])
        img = cv2.imread(
            os.path.join(config.result_path, 'img%d.png' % (i + 1)))
        height, width, _ = img.shape
        heatmap = cv2.applyColorMap(cv2.resize(CAMs[0], (width, height)),
                                    cv2.COLORMAP_JET)
        result = heatmap * 0.3 + img * 0.5
        cv2.imwrite(os.path.join(config.result_path, 'cam%d.png' % (i + 1)),
                    result)
        if i + 1 == config.num_result:
            break
        feature_blobs.clear()
print(args.__dict__)
print('+' * 80)

init_path = '{}/{}/{}_{}.init'.format(ckpt_path, 'init', args.dataset,
                                      args.clf)
best_path = os.path.join(ckpt_path, folder, 'models', model_name + '.best')
stop_path = os.path.join(ckpt_path, folder, 'models', model_name + '.stop')

if args.batch_size == 0:
    args.batch_size = args.num_train
    print("Resetting batch size: {}...".format(args.batch_size))

train_loader = get_trainloader(args.dataset, args.batch_size, False)
test_loader = get_testloader(args.dataset,
                             args.test_batch_size,
                             noise=args.noise)
print('+' * 80)

# Fire the engines
# Fire the engines
if args.clf == 'fcn':
    print('Initializing FCN...')
    model = FCN(args.input_size, args.output_size)
elif args.clf == 'svm':
    print('Initializing SVM...')
    model = SVM(args.input_size, args.output_size)
elif args.clf == 'resnet18':
    print('Initializing ResNet18...')
    model = resnet.resnet18(num_channels=args.num_channels,
                            num_classes=args.output_size)
Esempio n. 3
0
    save_dir = 'checkpoint.pth'

device = torch.device('cuda' if args.gpu else 'cpu')

if args.arch:
    model = build_network(
        'resnet50',
        hidden_layer1_units=args.hidden_units if args.hidden_units else 512)
else:
    model = build_network(
        hidden_layer1_units=args.hidden_units if args.hidden_units else 512)

# Load the datasets with ImageFolder
trainloader, train_dataset = get_trainloader()
valloader, _ = get_valloader()
testloader, _ = get_testloader()

# Using the image datasets and the trainforms, define the dataloaders
# trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
# valloader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=True)
# testloader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=True)


def train(epochs=3, lr=0.003, print_scores_in=40, gpu=True):

    # criterion
    criterion = nn.NLLLoss()
    # optimizer, we will only use classifier param. as features param are frozen.
    optimizer = optim.Adam(model.classifier.parameters(), lr=lr)
    # moving model
    model.to(device)
Esempio n. 4
0
def main():
    # get args
    args = get_args()

    # set up gpus
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    assert torch.cuda.is_available()

    # load models
    if 'gal' in args.model_file:
        leaky_relu = True
    else:
        leaky_relu = False
    ensemble = utils.get_models(args, train=False, as_ensemble=True, model_file=args.model_file, leaky_relu=leaky_relu)
    models = ensemble.models

    train_seed = args.model_file.split('/')[-3]
    train_alg = args.model_file.split('/')[-4]

    # get data loaders
    testloader = utils.get_testloader(args, batch_size=args.batch_size)
    
    # pick out samples that are correctly classified by all submodels
    correct = []
    for m in models:
        correct_m = []
        for (x, y) in testloader:
            x, y = x.cuda(), y.cuda()

            outputs = m(x)
            _, pred = outputs.max(1)
            correct_m.append(pred.eq(y))
        correct_m = torch.cat(correct_m)
        correct.append(correct_m)
    correct = torch.stack(correct, dim=-1).all(-1)
    correct_idx = correct.nonzero().squeeze(-1)

    random.seed(0)
    subset_idx = correct_idx[random.sample(range(correct_idx.size(0)), args.subset_num)].cpu()
    subset_loader = utils.get_testloader(args, batch_size=args.batch_size, shuffle=False, subset_idx=subset_idx)

    # PGD
    eps_list = [0.03]
    random_start = args.random_start
    steps = args.steps

    rob = {}
    rob['random_start'] = args.random_start
    rob['steps'] = args.steps
    
    for eps in tqdm(eps_list, desc='PGD eps', leave=False, position=0):
        correct_or_not_rs = torch.zeros((len(models), len(models)+1, args.subset_num, random_start), dtype=torch.bool)

        for rs in tqdm(range(random_start), desc='Random Start', leave=False, position=1):
            torch.manual_seed(rs)
            test_iter = tqdm(subset_loader, desc='Batch', leave=False, position=2)

            total = 0
            for (x, y) in test_iter:
                x, y = x.cuda(), y.cuda()

                adv_list = []
                for i, m in enumerate(models):
                    adversary = LinfPGDAttack(
                        m, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=eps,
                        nb_iter=steps, eps_iter=eps/5, rand_init=True, clip_min=0., clip_max=1.,
                        targeted=False)
            
                    adv = adversary.perturb(x, y)
                    adv_list.append(adv)

                for i, adv in enumerate(adv_list):
                    for j, m in enumerate(models):
                        if j == i:
                            outputs = m(x)
                            _, pred = outputs.max(1)
                            assert pred.eq(y).all()

                        outputs = m(adv)
                        _, pred = outputs.max(1)

                        correct_or_not_rs[i, j, total:total+x.size(0), rs] = pred.eq(y)
                
                    outputs = ensemble(adv)
                    _, pred = outputs.max(1)
                    correct_or_not_rs[i, len(models), total:total+x.size(0), rs] = pred.eq(y)
                
                total += x.size(0)

        correct_or_not_rs = torch.all(correct_or_not_rs, dim=-1)
        asr = np.zeros((len(models), len(models)+1))

        tqdm.write("eps: {:.2f}".format(eps))

        for i in range(len(models)):
            message = ''
            for j in range(len(models)+1):
                message += '\t{}: {:.2%}'.format(j, 1-correct_or_not_rs[i, j, :].sum().item()/args.subset_num)
                asr[i, j] = 1-correct_or_not_rs[i, j, :].sum().item()/args.subset_num
            tqdm.write(message)
        
        rob[str(eps)] = asr
    
    # save to file
    if args.save_to_file:
        output_root = os.path.join('results', 'transferability', train_alg, train_seed)
        if not os.path.exists(output_root):
            os.makedirs(output_root)
        output_filename = args.model_file.split('/')[-2]
        output = os.path.join(output_root, '.'.join((output_filename, 'pkl')))

        with open(output, 'wb') as f:
            pickle.dump(rob, f, pickle.HIGHEST_PROTOCOL)
Esempio n. 5
0
    std_out = sys.stdout
    sys.stdout = log_file
    print('+' * 80)
    print(model_name)
    print('+' * 80)

    init_path = '../init/{}_svm.init'.format(dataset)
    best_path = os.path.join(ckpt_path, model_name + '.best')
    stop_path = os.path.join(ckpt_path, model_name + '.stop')

    # prepare graph and data
    fog_graph, workers = get_fog_graph(hook, args.num_workers,
                                       args.num_clusters, args.shuffle_workers,
                                       args.uniform_clusters)

    test_loader = get_testloader(args)

    if non_iid == 10:
        data_file = '../ckpts/data_{}_iid_num_workers_{}' \
                    '_stratify_True_uniform_True_repeat_{}.pkl'.format(
                        dataset, args.num_workers, args.repeat)
    else:
        data_file = '../ckpts/data_{}_non_iid_{}_num_workers_{}' \
                    '_stratify_True_uniform_True_repeat_{}.pkl'.format(
                        dataset, non_iid, args.num_workers, args.repeat)
    print('Loading data: {}'.format(data_file))
    X_trains, y_trains = pkl.load(open(data_file, 'rb'))

    print(fog_graph)

    best = 0
Esempio n. 6
0
def main():
    # get args
    args = get_args()

    # set up gpus
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    assert torch.cuda.is_available()

    # load models
    if 'gal' in args.model_file:
        leaky_relu = True
    else:
        leaky_relu = False
    ensemble = utils.get_models(args, train=False, as_ensemble=True, model_file=args.model_file, leaky_relu=leaky_relu)

    # get data loaders
    total_sample_num = 10000
    if args.subset_num:
        random.seed(0)
        subset_idx = random.sample(range(total_sample_num), args.subset_num)
        testloader = utils.get_testloader(args, batch_size=200, shuffle=False, subset_idx=subset_idx)
    else:
        testloader = utils.get_testloader(args, batch_size=200, shuffle=False)

    loss_fn = nn.CrossEntropyLoss() if args.loss_fn == 'xent' else CarliniWagnerLoss(conf=args.cw_conf)

    rob = {}
    rob['sample_num'] = args.subset_num if args.subset_num else total_sample_num
    rob['loss_fn'] = 'xent' if args.loss_fn == 'xent' else 'cw_{:.1f}'.format(args.cw_conf)

    train_seed = args.model_file.split('/')[-3]
    train_alg = args.model_file.split('/')[-4]

    if args.convergence_check:
        eps = 0.01
        steps_list = [50, 500, 1000]
        random_start = 1

        rob['random_start'] = random_start
        rob['eps'] = eps

        # FGSM
        test_iter = tqdm(testloader, desc='FGSM', leave=False, position=0)
        adversary = GradientSignAttack(
            ensemble, loss_fn=nn.CrossEntropyLoss(), eps=eps, 
            clip_min=0., clip_max=1., targeted=False)
        _, label, pred, advpred = attack_whole_dataset(adversary, test_iter, device="cuda")
        print("Accuracy: {:.2f}%, FGSM Accuracy: {:.2f}%".format(
            100. * (label == pred).sum().item() / len(label),
            100. * (label == advpred).sum().item() / len(label)))
        rob['clean'] = 100. * (label == pred).sum().item() / len(label)
        rob['fgsm'] = 100. * (label == advpred).sum().item() / len(label)
        
        for steps in tqdm(steps_list, desc='PGD steps', leave=False, position=0):
            correct_or_not = []

            for i in tqdm(range(random_start), desc='Random Start', leave=False, position=1):
                torch.manual_seed(i)
                test_iter = tqdm(testloader, desc='Batch', leave=False, position=2)

                adversary = LinfPGDAttack(
                    ensemble, loss_fn=loss_fn, eps=eps,
                    nb_iter=steps, eps_iter=eps/5, rand_init=True, clip_min=0., clip_max=1.,
                    targeted=False)
                
                _, label, pred, advpred = attack_whole_dataset(adversary, test_iter, device="cuda") 
                correct_or_not.append(label == advpred)
            
            correct_or_not = torch.stack(correct_or_not, dim=-1).all(dim=-1)

            tqdm.write("Accuracy: {:.2f}%, steps: {:d}, PGD Accuracy: {:.2f}%".format(
                100. * (label == pred).sum().item() / len(label),
                steps,
                100. * correct_or_not.sum().item() / len(label)))
            
            rob[str(steps)] = 100. * correct_or_not.sum().item() / len(label)
        
        # save to file
        if args.save_to_csv:
            output_root = os.path.join('results', 'wbox', train_alg, train_seed, 'convergence_check')
            if not os.path.exists(output_root):
                os.makedirs(output_root)
            output_filename = args.model_file.split('/')[-2]
            output = os.path.join(output_root, '.'.join((output_filename, 'csv')))

            df = pd.DataFrame(rob, index=[0])
            if args.append_out and os.path.isfile(output):
                with open(output, 'a') as f:
                    f.write('\n')
                df.to_csv(output, sep=',', mode='a', header=False, index=False, float_format='%.2f')
            else:
                df.to_csv(output, sep=',', index=False, float_format='%.2f')
    else:
        eps_list = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07]

        rob['random_start'] = args.random_start
        rob['steps'] = args.steps
        
        for eps in tqdm(eps_list, desc='PGD eps', leave=True, position=0):            
            correct_or_not = []

            for i in tqdm(range(args.random_start), desc='Random Start', leave=False, position=1):
                torch.manual_seed(i)
                test_iter = tqdm(testloader, desc='Batch', leave=False, position=2)

                adversary = LinfPGDAttack(
                    ensemble, loss_fn=loss_fn, eps=eps,
                    nb_iter=args.steps, eps_iter=eps/5, rand_init=True, clip_min=0., clip_max=1.,
                    targeted=False)
                
                _, label, pred, advpred = attack_whole_dataset(adversary, test_iter, device="cuda")

                correct_or_not.append(label == advpred)
            
            correct_or_not = torch.stack(correct_or_not, dim=-1).all(dim=-1)

            tqdm.write("Accuracy: {:.2f}%, eps: {:.2f}, PGD Accuracy: {:.2f}%".format(
                100. * (label == pred).sum().item() / len(label),
                eps,
                100. * correct_or_not.sum().item() / len(label)))
            
            rob['clean'] = 100. * (label == pred).sum().item() / len(label)
            rob[str(eps)] = 100. * correct_or_not.sum().item() / len(label)
        
        # save to file
        if args.save_to_csv:
            output_root = os.path.join('results', 'wbox', train_alg, train_seed)
            if not os.path.exists(output_root):
                os.makedirs(output_root)
            output_filename = args.model_file.split('/')[-2]
            output = os.path.join(output_root, '.'.join((output_filename, 'csv')))

            df = pd.DataFrame(rob, index=[0])
            if args.append_out and os.path.isfile(output):
                with open(output, 'a') as f:
                    f.write('\n')
                df.to_csv(output, sep=',', mode='a', header=False, index=False, float_format='%.2f')
            else:
                df.to_csv(output, sep=',', index=False, float_format='%.2f')
Esempio n. 7
0
def main():
    # get args
    args = get_args()

    # set up gpus
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    assert torch.cuda.is_available()

    # load models
    if 'gal' in args.model_file:
        leaky_relu = True
    else:
        leaky_relu = False
    ensemble = utils.get_models(args,
                                train=False,
                                as_ensemble=True,
                                model_file=args.model_file,
                                leaky_relu=leaky_relu)
    models = ensemble.models

    train_seed = args.model_file.split('/')[-3]
    train_alg = args.model_file.split('/')[-4]

    # get data loaders
    testloader = utils.get_testloader(args, batch_size=100)

    # pick out samples that are correctly classified by all submodels
    correct = []
    for m in models:
        correct_m = []
        for (x, y) in testloader:
            x, y = x.cuda(), y.cuda()

            outputs = m(x)

            _, pred = outputs.max(1)
            correct_m.append(pred.eq(y))
        correct_m = torch.cat(correct_m)
        correct.append(correct_m)
    correct = torch.stack(correct, dim=-1).all(-1)
    correct_idx = correct.nonzero().squeeze(-1)

    random.seed(0)
    subset_idx = correct_idx[random.sample(range(correct_idx.size(0)),
                                           args.subset_num)].cpu()
    # use a very small batch size so that we can sample different layers multiple times
    subset_loader = utils.get_testloader(args,
                                         batch_size=10,
                                         shuffle=True,
                                         subset_idx=subset_idx)

    eps_list = [0.07]
    steps = 10

    rob = {}
    rob['steps'] = steps

    criterion = nn.CrossEntropyLoss(reduction='none')
    for eps in tqdm(eps_list, desc='eps', leave=False, position=0):
        losses = torch.zeros((len(models), len(models), args.subset_num))

        torch.manual_seed(0)
        loader = utils.DistillationLoader(subset_loader, subset_loader)
        test_iter = tqdm(loader, desc='Batch', leave=False, position=1)

        random.seed(0)
        total = 0
        for batch_idx, (si, sl, ti, tl) in enumerate(test_iter):
            si, sl = si.cuda(), sl.cuda()
            ti, tl = ti.cuda(), tl.cuda()

            layer = random.randint(1, args.depth)

            adv_list = []
            for i, m in enumerate(models):
                adv = Linf_distillation(m,
                                        si,
                                        ti,
                                        eps,
                                        eps / steps,
                                        steps,
                                        layer,
                                        before_relu=True,
                                        mu=1,
                                        momentum=True,
                                        rand_start=False)
                adv_list.append(adv)

            with torch.no_grad():
                for i, adv in enumerate(adv_list):
                    for j, m in enumerate(models):
                        if j == i:
                            outputs = m(si)
                            _, pred = outputs.max(1)
                            assert pred.eq(sl).all()

                        outputs = m(adv)
                        loss = criterion(outputs, tl)

                        losses[i, j, total:total + si.size(0)] = loss

            total += si.size(0)

        losses_np = torch.mean(losses, dim=-1).numpy()

        tqdm.write("eps: {:.2f}".format(eps))

        for i in range(len(models)):
            message = ''
            for j in range(len(models)):
                message += '\t{}: {:.3f}'.format(j, losses_np[i, j])
            tqdm.write(message)

        rob[str(eps)] = losses_np

    # save to file
    if args.save_to_file:
        output_root = os.path.join('results', 'diversity', train_alg,
                                   train_seed)
        if not os.path.exists(output_root):
            os.makedirs(output_root)
        output_filename = args.model_file.split('/')[-2]
        output = os.path.join(output_root, '.'.join((output_filename, 'pkl')))

        with open(output, 'wb') as f:
            pickle.dump(rob, f, pickle.HIGHEST_PROTOCOL)