Пример #1
0
def main_worker():
    global best_acc1

    if gpu is not None:
        logging.info("Use GPU: {} for training".format(gpu))

    # create model
    logging.info("=> using pre-trained model '{}'".format("alexnet"))
    normalize = NormalizeByChannelMeanStd(mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])

    if arch == "alexnet":
        model = alexnet(pretrained=True)
    elif arch == "resnet":
        model = resnet18(pretrained=True)

    model.eval()
    model = nn.Sequential(normalize, model)

    model = model.cuda(gpu)

    for epoch in range(epochs):
        # run one epoch
        train(model, epoch)
Пример #2
0
def load_config(cfg_id=1):
    EXPR_CFG_FILE = "cfg/experiment_%d.cfg"%cfg_id

    config = configparser.ConfigParser()
    config.read(EXPR_CFG_FILE)

    experimentID = config["experiment"]["ID"]

    options = config["visualize"]
    clean_data_root    = options["clean_data_root"]
    poison_root    = options["poison_root"]
    denoiser_path = options["denoiser_path"]
    arch        = options["arch"]
    eps         = int(options["eps"])
    noise_sd = float(options["noise_sd"])
    patch_size  = int(options["patch_size"])
    rand_loc    = options.getboolean("rand_loc")
    trigger_id  = int(options["trigger_id"])
    num_poison  = int(options["num_poison"])
    num_classes = int(options["num_classes"])
    batch_size  = int(options["batch_size"])

    options = config["poison_generation"]
    target_wnid = options["target_wnid"]
    source_wnid_list = options["source_wnid_list"].format(experimentID)
    num_source = int(options["num_source"])

    feature_extract = True

    # Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
    model_name = arch

    # Initialize the model for this run
    model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
    # logging.info(model_ft)

    # Transforms
    data_transforms = transforms.Compose([
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor(),
            ])

    dataset_clean = LabeledDataset(clean_data_root + "/train",
                                   "data/{}/finetune_filelist.txt".format(experimentID), data_transforms)
    dataset_test = LabeledDataset(clean_data_root + "/val",
                                   "data/{}/test_filelist.txt".format(experimentID), data_transforms)
    dataset_patched = LabeledDataset(clean_data_root + "/val",
                                   "data/{}/patched_filelist.txt".format(experimentID), data_transforms)

    dataloaders_dict = {}
    dataloaders_dict['test'] =  torch.utils.data.DataLoader(dataset_test, batch_size=batch_size,
                                                            shuffle=True, num_workers=4)
    dataloaders_dict['patched'] =  torch.utils.data.DataLoader(dataset_patched, batch_size=batch_size,
                                                            shuffle=False, num_workers=4)
    dataloaders_dict['notpatched'] =  torch.utils.data.DataLoader(dataset_patched, batch_size=batch_size,
                                                            shuffle=False, num_workers=4)

    trans_trigger = transforms.Compose([transforms.Resize((patch_size, patch_size)),transforms.ToTensor(),])

    trigger = Image.open('data/triggers/trigger_{}.png'.format(trigger_id)).convert('RGB')
    trigger = trans_trigger(trigger).unsqueeze(0).cuda()

    checkpointDir = "finetuned_models/" + experimentID + "/" + str(arch) + "/rand_loc_" +  str(rand_loc) + "/eps_" + str(eps) + \
                    "/patch_size_" + str(patch_size) + "/num_poison_" + str(num_poison) + "/trigger_" + str(trigger_id)

    normalize = NormalizeByChannelMeanStd(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    model = nn.Sequential(normalize, model_ft)

    checkpoint = torch.load(os.path.join(checkpointDir, "poisoned_model.pt"), map_location='cuda:0')
    model.load_state_dict(checkpoint['state_dict'])

    classifier = model
    classifier.eval()

    #######################################################################################
    # Load denoiser
    checkpoint = torch.load(os.path.join(denoiser_path, "checkpoint.pth.tar"))
    denoiser = get_architecture(checkpoint['arch'], 'imagenet')
    denoiser.load_state_dict(checkpoint['state_dict'])

    denoised_classifier = torch.nn.Sequential(denoiser.module, model)

    denoised_classifier = torch.nn.DataParallel(denoised_classifier).cuda()
    #######################################################################################

    denoised_classifier = denoised_classifier.cuda()

    return dataloaders_dict, classifier, denoised_classifier, trigger
    params_to_update = []
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            logging.info(name)
else:
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            logging.info(name)

optimizer_ft = optim.SGD(params_to_update, lr=lr, momentum=momentum)

# Setup the loss fxn
criterion = nn.CrossEntropyLoss()

normalize = NormalizeByChannelMeanStd(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])
model = nn.Sequential(normalize, model_ft)
model = model.cuda(gpu)

# Train and evaluate
model, meta_dict = train_model(model,
                               dataloaders_dict,
                               criterion,
                               optimizer_ft,
                               num_epochs=epochs,
                               is_inception=(model_name == "inception"))

save_checkpoint(
    {
        'arch': model_name,
        'state_dict': model.state_dict(),
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            logging.info(name)
else:
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            logging.info(name)

optimizer_ft = optim.SGD(params_to_update, lr=lr, momentum=momentum)

# Setup the loss fxn
criterion = nn.CrossEntropyLoss()

if data_name.upper() == 'CIFAR':
    normalize = NormalizeByChannelMeanStd(mean=[0.4914, 0.48216, 0.44653],
                                          std=[0.24703, 0.24349, 0.26159])
else:
    normalize = NormalizeByChannelMeanStd(mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])
# normalize = NormalizeByChannelMeanStd(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
model = nn.Sequential(normalize, model_ft)

model = model.cuda(gpu)

# Train and evaluate
model, meta_dict = train_model(model,
                               dataloaders_dict,
                               criterion,
                               optimizer_ft,
                               num_epochs=epochs,
                               is_inception=(model_name == "inception"))