Ejemplo n.º 1
0
def train(epochs, batch_size, learning_rate):

    train_loader = torch.utils.data.DataLoader(SegThorDataset(
        "data",
        phase='train',
        transform=transforms.Compose([Rescale(0.25),
                                      Normalize(),
                                      ToTensor()]),
        target_transform=transforms.Compose([Rescale(0.25),
                                             ToTensor()])),
                                               batch_size=batch_size,
                                               shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    model.apply(weight_init)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.95)
    #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

    for epoch in range(epochs):
        print('Epoch {}/{}'.format(epoch + 1, epochs))
        print('-' * 10)

        running_loss = 0.0
        loss_seg = np.zeros(5)

        for batch_idx, (train_data, labels) in enumerate(train_loader):
            train_data, labels = train_data.to(
                device, dtype=torch.float), labels.to(device,
                                                      dtype=torch.uint8)

            print("train data size", train_data.size())
            print("label size", labels.size())
            optimizer.zero_grad()
            output = model(train_data)

            print("output: {} and taget: {}".format(output.size(),
                                                    labels.size()))
            loss_label, loss = dice_loss(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            for i in range(4):
                loss_seg[i] += loss_label[i]

        print("Length: ", len(train_loader))
        epoch_loss = running_loss / len(train_loader)
        epoch_loss_class = np.true_divide(loss_seg, len(train_loader))
        print(
            "Dice per class: Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
            .format(epoch_loss_class[0], epoch_loss_class[1],
                    epoch_loss_class[2], epoch_loss_class[3],
                    epoch_loss_class[4]))
        print("Total Dice Loss: {:.4f}\n".format(epoch_loss))

    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
Ejemplo n.º 2
0
    def __init__(self, model_path, device="cpu"):
        super().__init__(model_path, device)

        self.vocab = Vocabulary()
        self.races = Races()
        self.genders = Genders()
        self.to_tensor = ToTensor()

        self.name_transform = Compose([self.vocab, OneHot(self.vocab.size), ToTensor()])
        self.race_transform = Compose([self.races, OneHot(self.races.size), ToTensor()])
        self.gender_transform = Compose([self.genders, OneHot(self.genders.size), ToTensor()])
Ejemplo n.º 3
0
 def init_dataset(self):
     return DnDCharacterNameDataset(
         root_dir=self.root_dir,
         name_transform=Compose(
             [self.vocab, OneHot(self.vocab.size),
              ToTensor()]),
         race_transform=Compose(
             [self.races, OneHot(self.races.size),
              ToTensor()]),
         gender_transform=Compose(
             [self.genders,
              OneHot(self.genders.size),
              ToTensor()]),
         target_transform=Compose([self.vocab, ToTensor()]))
Ejemplo n.º 4
0
def test():
    test_loader = DataLoader(NucleusDataset(
        'data',
        train=False,
        transform=transforms.Compose([Normalize(),
                                      Rescale(256),
                                      ToTensor()])),
                             batch_size=12,
                             shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = torch.load("models/model.pt")
    model.eval()
    model.to(device)

    with torch.no_grad():
        images = next(iter(test_loader)).to(device)

        outputs = model(images)

        images = tensor_to_numpy(images)
        outputs = tensor_to_numpy(outputs)

        show_images(images, outputs)
Ejemplo n.º 5
0
def exp(net, model_name, attack, test_dataset, device):
    original_net = None
    image_size = (128, 128)
    if model_name == 'baseline':
        original_net = BaseCNN()
    elif model_name == 'nvidia':
        original_net = Nvidia()
    elif model_name == 'vgg16':
        original_net = Vgg16()
    original_net.load_state_dict(torch.load(model_name + '.pt'))
    original_net = original_net.to(device)
    original_net.eval()

    # print(ast_ori, ast_dist)
    test_y = pd.read_csv('ch2_final_eval.csv')['steering_angle'].values
    test_composed = transforms.Compose(
        [Rescale(image_size),
         Preprocess('baseline'),
         ToTensor()])
    test_dataset = UdacityDataset(dataset_path, ['testing'], test_composed,
                                  'test')
    test_generator = DataLoader(test_dataset, batch_size=64, shuffle=False)
    target = 0.3
    ast_ori, _ = fgsm_ex(test_generator, original_net, model_name, target,
                         device, len(test_dataset))
    ast_dist, _ = fgsm_ex(test_generator, net, model_name, target, device,
                          len(test_dataset))
    print('fgsm:', ast_ori, ast_dist)

    advt_model = model_name + '_' + attack
    ast_ori, _ = advGAN_ex(test_generator, original_net, model_name, target,
                           device, len(test_dataset))
    ast_dist, _ = advGAN_ex(test_generator, net, advt_model, target, device,
                            len(test_dataset))
    print('advGAN:', ast_ori, ast_dist)

    advt_model = model_name + '_' + attack
    ast_ori, _ = advGAN_uni_ex(test_generator, original_net, model_name,
                               target, device, len(test_dataset))
    ast_dist, _ = advGAN_uni_ex(test_generator, net, advt_model, target,
                                device, len(test_dataset))
    print('advGAN_uni:', ast_ori, ast_dist)

    advt_model = model_name + '_' + attack
    ast_ori, _ = opt_uni_ex(test_generator, original_net, model_name, target,
                            device, len(test_dataset))
    ast_dist, _ = opt_uni_ex(test_generator, net, advt_model, target, device,
                             len(test_dataset))
    print('opt_uni:', ast_ori, ast_dist)

    ast_ori, _ = opt_ex(test_dataset, original_net, model_name, target, device,
                        len(test_dataset))
    ast_dist, _ = opt_ex(test_dataset, net, model_name, target, device,
                         len(test_dataset))
    print('opt:', ast_ori, ast_dist)
Ejemplo n.º 6
0
def main(args):
    if args.gpus is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
        print('Using {} GPUs'.format(args.gpus))

    train_transform = Compose(
        [Resize(args.input_size),
         ToTensor(),
         Norm(mean=(123, 117, 104))])
    trainset = VOCDetection(args.data_root,
                            args.train_set,
                            transform=train_transform,
                            do_norm=True)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               shuffle=True,
                                               batch_size=args.batch_size,
                                               num_workers=args.workers,
                                               collate_fn=detection_collate)

    model = build_ssd(cfg)
    if not args.checkpoint and args.pretrain:
        print('load pretrain model: {}'.format(args.pretrain))
        model.load_weight(args.pretrain)
    if args.gpus:
        model = torch.nn.DataParallel(model).cuda()
    criterion = multibox_loss.MultiboxLoss(args.num_classes,
                                           args.neg_pos_ratio)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    args.start_epoch = 0

    if args.checkpoint:
        print('=> loading checkpoint from {}...'.format(args.checkpoint))
        state = torch.load(args.checkpoint)
        args.start_epoch = state['epoch']
        model.load_state_dict(state['model'])
        optimizer.load_state_dict(state['optimizer'])

    for epoch in range(args.start_epoch, args.epochs):
        train(train_loader, model, criterion, optimizer, epoch, args)

        state = {
            'epoch': epoch + 1,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict()
        }
        # save checkpoint
        os.makedirs(args.checkpoint_dir, exist_ok=True)
        checkpoint_file = os.path.join(
            args.checkpoint_dir,
            'checkpoint_epoch_{:04d}.pth.tar'.format(state['epoch']))
        torch.save(state, checkpoint_file)
Ejemplo n.º 7
0
def train(epochs, batch_size, learning_rate):
    train_loader = torch.utils.data.DataLoader(
        NucleusDataset("data", train=True,
                       transform=transforms.Compose([
                           Normalize(),
                           Rescale(256),
                           ToTensor()
                       ]),
                       target_transform=transforms.Compose([
                           Normalize(),
                           Rescale(256),
                           ToTensor()
                       ])),
        batch_size=batch_size, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    for epoch in range(epochs):
        print('Epoch {}/{}'.format(epoch + 1, epochs))
        print('-' * 10)

        running_loss = 0.0
        for batch_idx, (images, masks) in enumerate(train_loader):
            images, masks = images.to(device), masks.to(device)

            optimizer.zero_grad()

            output = model(images)
            loss = F.binary_cross_entropy(output, masks)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

        epoch_loss = running_loss / len(train_loader)
        print("Loss: {:.4f}\n".format(epoch_loss))

    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
Ejemplo n.º 8
0
def ex2_fun(gen_model, test_model, device):
    full_indices = list(range(5614))
    test_indices = list(np.random.choice(5614, int(0.2*5614), replace=False))
    root_dir = '../udacity-data'
    (gen_model_name, gen_net) = gen_model
    (test_model_name, test_net) = test_model
    image_size = (128, 128)
    # gen_image_size =None
    # if gen_model_name == 'baseline':
    #     gen_image_size = (128, 128)
    # elif gen_model_name == 'nvidia':
    #     gen_image_size = (66, 200)
    # elif gen_model_name == 'vgg16':
    #     gen_image_size = (224, 224)

    # test_image_size =None
    # if test_model_name == 'baseline':
    #     test_image_size = (128, 128)
    # elif test_model_name == 'nvidia':
    #     test_image_size = (66, 200)
    # elif test_model_name == 'vgg16':
    #     test_image_size = (224, 224)
    composed = transforms.Compose([Rescale((image_size[1],image_size[0])), Preprocess(), ToTensor()])
    # test_composed = transforms.Compose([Rescale((test_image_size[1],test_image_size[0])), Preprocess(), ToTensor()])
        # train_dataset = UdacityDataset(root_dir, ['HMB1', 'HMB2', 'HMB4'], test_composed, type_='train')
    full_dataset = UdacityDataset(root_dir, ['testing'], composed, type_='test')
    # dataset = torch.utils.data.Subset(full_dataset, test_indices)
    dataset = full_dataset
    # full_dataset = UdacityDataset(root_dir, ['testing'], test_composed, type_='test')
    # test_dataset = torch.utils.data.Subset(full_dataset, test_indices)
    # test_generator = DataLoader(test_dataset, batch_size=1, shuffle=False)
    adv_root_path = '../udacity-data/adv_testing/'
    target = 0.3
    success = []
    attacks = ('fgsm_attack', 'opt_attack', 'universal_attack', 'advGAN_attack', 'advGAN_universal_attack')

    noise_u = np.load(gen_model_name + '_universal_attack_noise.npy')
    noise_u = torch.from_numpy(noise_u).type(torch.FloatTensor).to(device)

    advGAN_generator = Generator(3,3, gen_model_name).to(device)
    advGAN_uni_generator = Generator(3,3, gen_model_name).to(device)

    advGAN_generator.load_state_dict(torch.load('./models/' + gen_model_name + '_netG_epoch_60.pth'))    
    advGAN_uni_generator.load_state_dict(torch.load('./models/' + gen_model_name + '_universal_netG_epoch_60.pth'))    
    noise_seed = np.load(gen_model_name + '_noise_seed.npy')
    noise_a = advGAN_generator(torch.from_numpy(noise_seed).type(torch.FloatTensor).to(device))
    for attack in attacks:
        total_diff = np.array([])
        adv_test_path = adv_root_path + gen_model_name + '/' + attack + '/testing/npy/'
        data_loader = iter(DataLoader(full_dataset, batch_size=64, shuffle=False))

        for i in range(88):
            adv_image = np.load(adv_test_path + 'batch_' + str(i) + '.npy')
            adv_image = torch.from_numpy(adv_image)
            adv_image = adv_image.type(torch.FloatTensor)
            adv_image = adv_image.to(device)

            ori_image = next(data_loader)['image']
            ori_image = ori_image.type(torch.FloatTensor)
            ori_image = ori_image.to(device)

            ori_y = test_net(ori_image)
            adv_y = test_net(adv_image)
            diff = (adv_y - ori_y).detach().cpu().numpy()
            diff = np.squeeze(diff)
            total_diff = np.concatenate((total_diff, diff))
        success_ = len(total_diff[abs(total_diff) >= target]) 
        print(np.mean(total_diff))
        print('test ' + gen_model_name + ' ' + attack + ' adv_image on ' +  test_model_name + ' model:', success_ / 5614)
        success.append(success_ / 5614)

    # print(len(gen_dataset))
    #for i in range(len(dataset)):
    # for i in range(88):

    #     #print(i)
    #     # gen_x = dataset[i]['image']
    #     # gen_x = gen_x.unsqueeze(0)
    #     # gen_x = gen_x.type(torch.FloatTensor)
    #     # gen_x = gen_x.to(device)
    #     # test_x = dataset[i]['image']
    #     # test_x = test_x.unsqueeze(0)
    #     # test_x = test_x.type(torch.FloatTensor)
    #     # test_x = test_x.to(device)
    #     # test_x.unsqueeze(0)     
    #     test_y_pred = test_net(test_x)
    #     # fgsm
    #     _, plt, _, perturbed_image = attack_test.fgsm_attack_test(gen_net, gen_x, target, device, image_size=image_size)
    #     # perturbed_image = perturbed_image[0,:,:,:]
    #     #imsave('experiment_result/experiment_2/' + gen_model_name + '/fgsm_attack/' + str(i+1479425441182877835) + '.jpg', perturbed_image)
    #     plt.close()
    #     # perturbed_image_resize = cv2.resize(perturbed_image, (test_image_size[1], test_image_size[0]))
    #     perturbed_image = torch.from_numpy(perturbed_image).type(torch.FloatTensor).to(device)
    #     test_y_adv = test_net(perturbed_image)
    #     # print(test_y_pred.item(), test_y_adv.item())
    #     if abs(test_y_adv.item() - test_y_pred.item()) >= target:
    #         success[0] += 1 
        
    #     _, plt, perturbed_image = attack_test.optimized_attack_test(gen_net, gen_x, target, device, image_size=image_size)
    #     #perturbed_image = perturbed_image[0,:,:,:]
    #     #imsave('experiment_result/experiment_2/' + gen_model_name + '/opt_attack/' + str(i+1479425441182877835) + '.jpg', perturbed_image)
    #     plt.close()

    #     perturbed_image = torch.from_numpy(perturbed_image).type(torch.FloatTensor).to(device)
    #     test_y_adv = test_net(perturbed_image)

    #     if abs(test_y_adv.item() - test_y_pred.item()) >= target:
    #         success[1] += 1 
        
    #     _, plt, perturbed_image = attack_test.optimized_uni_test(gen_net, gen_x, device, noise_u, image_size=image_size)
    #     #perturbed_image = perturbed_image[0,:,:,:]
    #     #imsave('experiment_result/experiment_2/' + gen_model_name + '/universal_attack/' + str(i+1479425441182877835) + '.jpg', perturbed_image)
    #     plt.close()

    #     perturbed_image = torch.from_numpy(perturbed_image).type(torch.FloatTensor).to(device)

    #     test_y_adv = test_net(perturbed_image)

    #     if abs(test_y_adv.item() - test_y_pred.item()) >= target:
    #         success[2] += 1 
        
    #     _, plt, perturbed_image = attack_test.advGAN_test(gen_net, gen_x, advGAN_generator, device, image_size=image_size)
    #     #perturbed_image = perturbed_image[0,:,:,:]
    #     #imsave('experiment_result/experiment_2/' + gen_model_name + '/advGAN_attack/' + str(i+1479425441182877835) + '.jpg', perturbed_image)
    #     plt.close()

    #     perturbed_image = torch.from_numpy(perturbed_image).type(torch.FloatTensor).to(device)

    #     test_y_adv = test_net(perturbed_image)

    #     if abs(test_y_adv.item() - test_y_pred.item()) >= target:
    #         success[3] += 1   
              
    #     _, plt, perturbed_image = attack_test.advGAN_uni_test(gen_net, gen_x, device, noise_a, image_size=image_size)
    #     #perturbed_image = perturbed_image[0,:,:,:]
    #     #imsave('experiment_result/experiment_2/' + gen_model_name + '/advGAN_universal_attack/' + str(i+1479425441182877835) + '.jpg', perturbed_image)
    #     plt.close()

    #     perturbed_image = torch.from_numpy(perturbed_image).type(torch.FloatTensor).to(device)

    #     test_y_adv = test_net(perturbed_image)

    #     if abs(test_y_adv.item() - test_y_pred.item()) >= target:
    #         success[4] += 1 
    # print('test ' + gen_model_name + ' adv_image on ' +  test_model_name + ' model:', [s/len(full_dataset) for s in success])
    return success
    parser.add_argument('--premodel',
                        help="premodel to use, alex or vgg or dense",
                        default='alex',
                        type=str,
                        choices=['alex', 'vgg', 'dense'])
    return parser


if __name__ == '__main__':
    parser = arg_parse()
    args = parser.parse_args()
    print(args)
    data = PlacePulseDataset(
        args.csv, args.dataset,
        transforms.Compose([Rescale((224, 224)),
                            ToTensor()]), args.attribute)
    len_data = len(data)
    train_len = int(len_data * 0.65)
    val_len = int(len_data * 0.05)
    test_len = len_data - train_len - val_len
    train, val, test = random_split(data, [train_len, val_len, test_len])
    print(len(test))
    dataloader = DataLoader(test,
                            batch_size=args.batch_size,
                            shuffle=True,
                            num_workers=args.num_workers)
    if args.cuda:
        device = torch.device("cuda:{}".format(args.cuda_id) if torch.cuda.
                              is_available() else "cpu")
    else:
        device = torch.device("cpu")
    # perturbed_image_advGAN_U = perturbed_image_advGAN_U.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    # noise_advGAN_U = noise_advGAN_U.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    # perturbed_image_advGAN_U = draw(perturbed_image_advGAN_U, adv_output_advGAN_U.item(), output.item())
    # perturbed_image_advGAN_U = imresize(perturbed_image_advGAN_U, (128, 128))

    # for i, sample in enumerate(test_dataset):
    # batch_size = sample['image'].size(0)
    # noise_seed = np.load(model_name + '_noise_seed.npy')
    # noise_seed = np.tile(noise_seed, (batch_size, 1, 1, 1))
    # noise = advGAN_generator(torch.from_numpy(noise_seed).type(torch.FloatTensor).to(device))

    dataset_path = '../udacity-data'
    test_composed = transforms.Compose(
        [Rescale((128, 128)),
         Preprocess('baseline'),
         ToTensor()])
    test_dataset = UdacityDataset(dataset_path, ['testing'], test_composed,
                                  'test')
    test_generator = DataLoader(test_dataset, batch_size=1, shuffle=False)
    # t0 = time.time()

    # for _, sample_batched in enumerate(test_generator):
    #     batch_x = sample_batched['image']
    #     # print(batch_x.size())
    #     # print(batch_x.size())

    #     # print(batch_y)

    #     batch_x = batch_x.type(torch.FloatTensor)

    #     batch_x = batch_x.to(device)
def cal_detection_rate():
    for model_name in ['baseline', 'nvidia', 'vgg16']:
        # model_name = 'vgg16'
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        if  'baseline' in model_name:
            net = BaseCNN()
        elif  'nvidia' in model_name:
            net = Nvidia()
        elif 'vgg16' in model_name:
            net = Vgg16(False)

        net.load_state_dict(torch.load(model_name + '.pt')) 
        net.eval()
        net = net.to(device)
        dataset_path = '../udacity-data'
        root_dir = dataset_path
        test_composed = transforms.Compose([Rescale((128, 128)), Preprocess('baseline'), ToTensor()])
        image_size = (128, 128)
        full_dataset = UdacityDataset(root_dir, ['testing'], test_composed, type_='test')
        full_indices = list(range(5614))
        test_indices = list(np.random.choice(5614, int(0.2*5614), replace=False))
        train_indices = list(set(full_indices).difference(set(test_indices)))
        train_dataset = torch.utils.data.Subset(full_dataset, train_indices)
        test_dataset = torch.utils.data.Subset(full_dataset, test_indices)
        test_data_loader = DataLoader(full_dataset, batch_size=1, shuffle=False)
        num_sample = len(full_dataset)
        target = 0.3
        # attack_detection(model_name, net, test_data_loader, attack='fgsm')
        # attack_detection(model_name, net, test_data_loader, attack='advGAN')
        # attack_detection(model_name, net, test_data_loader, attack='advGAN_uni')
        # attack_detection(model_name, net, test_data_loader, attack='opt_uni')
        # attack_detection(model_name, net, test_data_loader, attack='opt')
        print('threshold', 0.01)
        attack_detection(model_name, net, test_data_loader, attack='fgsm', threshold=0.01)
        attack_detection(model_name, net, test_data_loader, attack='advGAN', threshold=0.01)
        attack_detection(model_name, net, test_data_loader, attack='advGAN_uni', threshold=0.01)
        attack_detection(model_name, net, test_data_loader, attack='opt_uni', threshold=0.01)
        attack_detection(model_name, net, test_data_loader, attack='opt', threshold=0.01)
Ejemplo n.º 12
0
    for key in puzzle_state_dict:
        if key.startswith('vnet.'):
            new_puzzle_state_dict[key[4:]] = puzzle_state_dict[key]

    state_dict = net.state_dict()
    state_dict.update(new_puzzle_state_dict)
    net.load_state_dict(state_dict)

    if sys.argv[2] == 'train':
        net_parallel = nn.DataParallel(net)
        dataset = NIHDataset(list_file=list_path,
                             root_dir=train_data_path,
                             transform=transforms.Compose([
                                 RandomCrop(patch_size),
                                 ToTensor(),
                             ]))
        dataloader = DataLoader(dataset,
                                batch_size=4,
                                shuffle=True,
                                num_workers=4)

        net_parallel.train()

        optimizer = optim.SGD(net_parallel.parameters(),
                              lr=base_lr,
                              momentum=0.9,
                              weight_decay=0.00004)
        iter_num = 0
        while True:
            for i_batch, sampled_batch in enumerate(dataloader):
Ejemplo n.º 13
0
def test_on_gen(net, model_name, dataset_path, attack, device):
    original_net = None
    if model_name == 'baseline':
        original_net = BaseCNN()
    elif model_name == 'nvidia':
        original_net = Nvidia()
    elif model_name == 'vgg16':
        original_net = build_vgg16(False)
    original_net.load_state_dict(torch.load(model_name + '.pt'))
    original_net = original_net.to(device)
    original_net.eval()

    test_y = pd.read_csv('ch2_final_eval.csv')['steering_angle'].values
    test_composed = transforms.Compose(
        [Rescale(image_size),
         Preprocess('baseline'),
         ToTensor()])
    test_dataset = UdacityDataset(dataset_path, ['testing'], test_composed,
                                  'test')
    test_generator = DataLoader(test_dataset, batch_size=1, shuffle=False)
    with torch.no_grad():
        # test on original dataset
        yhat = []
        y_original = []
        # test_y = []

        for _, sample_batched in enumerate(test_generator):
            batch_x = sample_batched['image']
            batch_y = sample_batched['steer']
            batch_x = batch_x.type(torch.FloatTensor)
            batch_y = batch_y.type(torch.FloatTensor)
            batch_x = batch_x.to(device)
            batch_y = batch_y.to(device)

            output = net(batch_x)
            output_ori = original_net(batch_x)
            # print(output.item(), batch_y.item())
            yhat.append(output.item())
            y_original.append(output_ori.item())
        yhat = np.array(yhat)
        y_original = np.array(y_original)
        rmse = np.sqrt(np.mean((yhat - test_y)**2))
        rmse_ori = np.sqrt(np.mean((y_original - test_y)**2))
        print('adv model on ori dataset:', rmse, 'ori model on ori dataset: ',
              rmse_ori)
        plt.figure(figsize=(32, 8))
        plt.plot(test_y, 'r.-', label='target')
        plt.plot(yhat, 'b^-', label='predict')
        plt.legend(loc='best')
        plt.title("RMSE: %.2f" % rmse)
        # plt.show()
        model_fullname = "%s_%d.png" % (model_name + '_' + attack,
                                        int(time.time()))
        plt.savefig(model_fullname)

    test_generator = DataLoader(test_dataset, batch_size=64, shuffle=False)
    target = 0.3

    # test adv_training model on adv images generated based on itself
    # ast_ori, _ = fgsm_ex(test_generator, original_net, 'baseline', target, device, len(test_dataset))
    # ast_dist,_ = fgsm_ex(test_generator, net, 'baseline', target, device, len(test_dataset))
    # print(ast_ori, ast_dist)
    success = 0
    success_ = 0
    # test  adv_training model on adv images generated based on original model
    for _, sample_batched in enumerate(test_generator):
        batch_x = sample_batched['image']
        batch_x = batch_x.type(torch.FloatTensor)
        batch_x = batch_x.to(device)
        y_pred = net(batch_x)
        y_pred_ori = original_net(batch_x)
        # fgsm_attack
        adv_x = fgsm_attack_(original_net, batch_x, target, device)
        y_fgsm = net(adv_x)
        y_ori_fgsm = original_net(adv_x)
        diff = abs(y_fgsm - y_pred)
        success += len(diff[diff >= abs(target)])

        diff = abs(y_ori_fgsm - y_pred_ori)
        success_ += len(diff[diff >= abs(target)])
    print('fgsm', success / len(test_dataset), success_ / len(test_dataset))

    # opt attack
    # success = 0
    # success_ = 0
    # for _,sample_batched in enumerate(test_dataset):
    #     batch_x = sample_batched['image']
    #     batch_x = batch_x.type(torch.FloatTensor)
    #     batch_x = batch_x.unsqueeze(0)
    #     batch_x = batch_x.to(device)
    #     y_pred = net(batch_x)
    #     y_pred_ori = original_net(batch_x)
    #     # fgsm_attack
    #     # adv_x = fgsm_attack_(original_net, batch_x, target, device)
    #     adv_x,_,_,_ = optimized_attack(original_net, target, batch_x)
    #     y_fgsm = net(adv_x)
    #     y_ori_fgsm = original_net(adv_x)
    #     diff = abs(y_fgsm - y_pred)
    #     success += len(diff[diff >= abs(target)])

    #     diff = abs(y_ori_fgsm - y_pred_ori)
    #     success_ += len(diff[diff >= abs(target)])
    # print('opt', success / len(test_dataset), success_ / len(test_dataset))

    # opt universal attack
    noise_u = np.load(model_name + '_universal_attack_noise.npy')
    noise_u = torch.from_numpy(noise_u).type(torch.FloatTensor).to(device)
    success = 0
    success_ = 0
    for _, sample_batched in enumerate(test_generator):
        batch_x = sample_batched['image']
        batch_x = batch_x.type(torch.FloatTensor)
        batch_x = batch_x.to(device)
        y_pred = net(batch_x)
        y_pred_ori = original_net(batch_x)

        # adv_x = fgsm_attack_(original_net, batch_x, target, device)
        # noise = advGAN_generator(batch_x)
        perturbed_image = batch_x + noise_u
        adv_x = torch.clamp(perturbed_image, 0, 1)
        y_fgsm = net(adv_x)
        y_ori_fgsm = original_net(adv_x)
        diff = abs(y_fgsm - y_pred)
        success += len(diff[diff >= abs(target)])

        diff = abs(y_ori_fgsm - y_pred_ori)
        success_ += len(diff[diff >= abs(target)])
    print('opt uni', success / len(test_dataset), success_ / len(test_dataset))

    # test for advGAN attack
    success = 0
    success_ = 0
    advGAN_generator = Generator(3, 3, model_name).to(device)
    advGAN_generator.load_state_dict(
        torch.load('./models/' + model_name + '_netG_epoch_60.pth'))

    for _, sample_batched in enumerate(test_generator):
        batch_x = sample_batched['image']
        batch_x = batch_x.type(torch.FloatTensor)
        batch_x = batch_x.to(device)
        y_pred = net(batch_x)
        y_pred_ori = original_net(batch_x)

        # adv_x = fgsm_attack_(original_net, batch_x, target, device)
        noise = advGAN_generator(batch_x)
        perturbed_image = batch_x + torch.clamp(noise, -0.3, 0.3)
        adv_x = torch.clamp(perturbed_image, 0, 1)
        y_fgsm = net(adv_x)
        y_ori_fgsm = original_net(adv_x)
        diff = abs(y_fgsm - y_pred)
        success += len(diff[diff >= abs(target)])

        diff = abs(y_ori_fgsm - y_pred_ori)
        success_ += len(diff[diff >= abs(target)])
    print('advGAN', success / len(test_dataset), success_ / len(test_dataset))

    # test for advGAN uni attack

    advGAN_uni_generator = Generator(3, 3, model_name).to(device)
    advGAN_uni_generator.load_state_dict(
        torch.load('./models/' + model_name + '_universal_netG_epoch_60.pth'))
    noise_seed = np.load(model_name + '_noise_seed.npy')
    noise_a = advGAN_uni_generator(
        torch.from_numpy(noise_seed).type(torch.FloatTensor).to(device))
    success = 0
    success_ = 0
    for _, sample_batched in enumerate(test_generator):
        batch_x = sample_batched['image']
        batch_x = batch_x.type(torch.FloatTensor)
        batch_x = batch_x.to(device)
        y_pred = net(batch_x)
        y_pred_ori = original_net(batch_x)

        # adv_x = fgsm_attack_(original_net, batch_x, target, device)
        # noise = advGAN_generator(batch_x)
        perturbed_image = batch_x + torch.clamp(noise_a, -0.3, 0.3)
        adv_x = torch.clamp(perturbed_image, 0, 1)
        y_fgsm = net(adv_x)
        y_ori_fgsm = original_net(adv_x)
        diff = abs(y_fgsm - y_pred)
        success += len(diff[diff >= abs(target)])

        diff = abs(y_ori_fgsm - y_pred_ori)
        success_ += len(diff[diff >= abs(target)])
    print('advGAN uni', success / len(test_dataset),
          success_ / len(test_dataset))
Ejemplo n.º 14
0
    models_name = ['baseline', 'nvidia', 'vgg16']

    adv_datasets = '../udacity-data/adv_data'
    #attacks = [ 'universal_attack',  'advGAN_universal_attack']
    #attacks = ['opt_attack', 'fgsm_attack','advGAN_attack','universal_attack',  'advGAN_universal_attack']
    attacks = ['fgsm_attack']
    dataset_path = '../udacity-data/'
    if test:
        full_indices = list(range(5614))
        test_indices = list(
            np.random.choice(5614, int(0.2 * 5614), replace=False))
        train_indices = list(set(full_indices).difference(set(test_indices)))
        test_composed = transforms.Compose([
            Rescale((image_size[1], image_size[0])),
            Preprocess(),
            ToTensor()
        ])

        full_dataset = UdacityDataset(dataset_path, ['testing'],
                                      test_composed,
                                      type_='test')

        train_dataset = torch.utils.data.Subset(full_dataset, train_indices)
        test_dataset = torch.utils.data.Subset(full_dataset, test_indices)
        net = Vgg16()
        # net.load_state_dict(torch.load('adv_training_models/' + 'nvidia' + '_' + 'fgsm_attack' +  '.pt'))
        net.load_state_dict(
            torch.load('adv_training_models/vgg16_fgsm_attack.pt'))

        net = net.to(device)
        net.eval()
if __name__ == "__main__":
    models_name = ['nvidia', 'baseline', 'vgg16']
    image_size = (128, 128)
    batch_size = 32
    epochs = 15
    train = 1
    dataset_path = '../udacity-data'
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    composed = transforms.Compose([
        Rescale(image_size),
        RandFlip(),
        RandRotation(),
        Preprocess(model_name),
        ToTensor()
    ])
    dataset = UdacityDataset(dataset_path,
                             ['HMB1', 'HMB2', 'HMB4', 'HMB5', 'HMB6'],
                             composed)
    train_generator = DataLoader(dataset,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=8)

    steps_per_epoch = int(len(dataset) / batch_size)

    T = [5, 10, 15, 20, 25]
    #T = [25]
    for model_name in models_name:
        for t in T:
Ejemplo n.º 16
0
def experiment_1():
    device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
    target_models = []
    basecnn = 'baseline.pt'
    nvidia = 'nvidia.pt'
    vgg16 = 'vgg16.pt'
    model1 = BaseCNN()
    model1.to(device)
    model1.load_state_dict(torch.load(basecnn))
    model1.eval()   
    model2 = Nvidia()
    model2.to(device)
    model2.load_state_dict(torch.load(nvidia))
    model2.eval()
    model3 = Vgg16()
    model3.to(device)
    model3.load_state_dict(torch.load(vgg16))
    model3.eval()
    target_models.append(('baseline', model1))
    # target_models.append(('vgg16', model3))
    # target_models.append(('nvidia', model2))

    root_dir = '../udacity-data'
    target = 0.3
    attacks = ('FGSM', 'Optimization', 'Optimization Universal', 'AdvGAN', 'AdvGAN Universal')
    fgsm_result = []
    opt_result = []
    optu_result = []
    advGAN_result = []
    advGANU_result = []
    fgsm_diff = []
    opt_diff = []
    optu_diff = []
    advGAN_diff = []
    advGANU_diff = []
    # models = ('baseline')

    full_indices = list(range(5614))
    test_indices = list(np.random.choice(5614, int(0.2*5614), replace=False))
    train_indices = list(set(full_indices).difference(set(test_indices)))
    image_size = (128, 128)
    # if model_name == 'baseline':
    #     image_size = (128, 128)
    # elif model_name == 'nvidia':
    #     image_size = (66, 200)
    # elif model_name == 'vgg16':
    #     image_size = (224, 224)
    test_composed = transforms.Compose([Rescale((image_size[1],image_size[0])), Preprocess(), ToTensor()])
    # train_dataset = UdacityDataset(root_dir, ['HMB1', 'HMB2', 'HMB4'], test_composed, type_='train')
    full_dataset = UdacityDataset(root_dir, ['testing'], test_composed, type_='test')

    train_dataset = torch.utils.data.Subset(full_dataset, train_indices)
    test_dataset = torch.utils.data.Subset(full_dataset, test_indices)

    for (model_name, model) in target_models:

        # train_size = int(0.8*len(full_dataset))
        # test_size =len(full_dataset) - train_size

        test_data_loader = torch.utils.data.DataLoader(full_dataset,batch_size=64,shuffle=False)
        num_sample = len(full_dataset)
        # universal perturbation generation
        if not os.path.exists(model_name + '_universal_attack_noise.npy'):
            print('Start universal attack training')
            perturbation = generate_noise(train_dataset, model, model_name, device, target)
            np.save(model_name + '_universal_attack_noise', perturbation)
            print('Finish universal attack training.')

        # # advGAN training
        if not os.path.exists('./models/' + model_name + '_netG_epoch_60.pth'):
            print('Start advGAN training')
            advGAN = advGAN_Attack(model_name, model_name + '.pt', target + 0.2, train_dataset)
            torch.save(advGAN.netG.state_dict(), './models/' + model_name +'_netG_epoch_60.pth')
            print('Finish advGAN training')

        # # advGAN_uni training
        if not os.path.exists('./models/' + model_name + '_universal_netG_epoch_60.pth'):
            print('Start advGAN_uni training')
            advGAN_uni = advGAN_Attack(model_name, model_name + '.pt', target + 0.2, train_dataset, universal=True)
            advGAN_uni.save_noise_seed(model_name + '_noise_seed.npy')

            torch.save(advGAN_uni.netG.state_dict(), './models/' + model_name +'_universal_netG_epoch_60.pth')
            print('Finish advGAN_uni training')

        print("Testing: " + model_name)
        #fgsm attack
        fgsm_ast, diff = fgsm_ex(test_data_loader, model, model_name, target, device, num_sample, image_size)
        print(fgsm_ast)
        fgsm_result.append(fgsm_ast)
        fgsm_diff.append(diff)
        # # optimization attack
        opt_ast, diff = opt_ex(test_dataset, model, model_name, target, device, num_sample, image_size)
        print(opt_ast)
        opt_result.append(opt_ast)
        opt_diff.append(diff)
        # optimized-based universal attack
        optu_ast, diff = opt_uni_ex(test_data_loader, model, model_name, target, device, num_sample, image_size)
        print(optu_ast)
        optu_result.append(optu_ast)
        optu_diff.append(diff)
        # advGAN attack
        advGAN_ast, diff = advGAN_ex(test_data_loader, model, model_name, target, device, num_sample, image_size)        
        print(advGAN_ast)
        advGAN_result.append(advGAN_ast)
        advGAN_diff.append(diff)
        # advGAN_universal attack
        advGANU_ast, diff = advGAN_uni_ex(test_data_loader, model, model_name, target, device, num_sample, image_size)
        print(advGANU_ast)
        advGANU_result.append(advGANU_ast)
        advGANU_diff.append(diff)
Ejemplo n.º 17
0
def ex3_gen_adv(generator, gen_model, device):
    root_dir = '../udacity-data'
    adv_root_dir = '../udacity-data/adv_data'
    device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
    image_size = (128, 128)
    test_composed = transforms.Compose([Rescale(image_size), Preprocess(), ToTensor()])
    basecnn = 'baseline.pt'
    nvidia = 'nvidia.pt'
    vgg16 = 'vgg16.pt'
    model1 = BaseCNN()
    model1.to(device)
    model1.load_state_dict(torch.load(basecnn))
    model1.eval() 
    model2 = Nvidia()
    model2.to(device)
    model2.load_state_dict(torch.load(nvidia))
    model2.eval()
    model3 = Vgg16()
    model3.to(device)
    model3.load_state_dict(torch.load(vgg16))
    model3.eval()
    target_models = []
    target_models.append(('baseline', model1))
    #target_models.append(('nvidia', model2))
    target_models.append(('vgg16', model3))
    train = 0    
    attacks = ['advGAN_attack']
    # attacks = ['fgsm_attack', 'universal_attack', 'advGAN_attack', 'advGAN_universal_attack', 'opt_attack',]
    target = 0.3
    if train:
        hmb_list =[('HMB1', 1479424215880976321),('HMB2', 1479424439139199216),('HMB4', 1479425729831388501), ('HMB5', 1479425834048269765), ('HMB6', 1479426202229245710)]
    else:
        hmb_list = [('testing', 1479425441182877835)]
    # for (model_name, model) in target_models:
    #     noise_u = np.load(model_name + '_universal_attack_noise.npy')
    #     noise_u = torch.from_numpy(noise_u).type(torch.FloatTensor).to(device)

    #     advGAN_generator = Generator(3,3, model_name).to(device)
    #     advGAN_uni_generator = Generator(3,3, model_name).to(device)

    #     advGAN_generator.load_state_dict(torch.load('./models/' + model_name + '_netG_epoch_60.pth'))    
    #     advGAN_uni_generator.load_state_dict(torch.load('./models/' + model_name + '_universal_netG_epoch_60.pth'))    
    #     noise_seed = np.load(model_name + '_noise_seed.npy')
    #     noise_a = advGAN_uni_generator(torch.from_numpy(noise_seed).type(torch.FloatTensor).to(device))
    #     save_dir = os.path.join(adv_root_dir, model_name)
    for (model_name, model) in target_models:
        noise_u = np.load(model_name + '_universal_attack_noise.npy')
        noise_u = torch.from_numpy(noise_u).type(torch.FloatTensor).to(device)

        advGAN_generator = Generator(3,3, model_name).to(device)
        advGAN_uni_generator = Generator(3,3, model_name).to(device)

        advGAN_generator.load_state_dict(torch.load('./models/' + model_name + '_netG_epoch_60.pth'))    
        advGAN_generator.eval()
        advGAN_uni_generator.load_state_dict(torch.load('./models/' + model_name + '_universal_netG_epoch_60.pth'))    
        advGAN_uni_generator.eval()
        noise_seed = np.load(model_name + '_noise_seed.npy')
        noise_a = advGAN_uni_generator(torch.from_numpy(noise_seed).type(torch.FloatTensor).to(device))
        save_dir = os.path.join(adv_root_dir, model_name)
        for (hmb, start) in hmb_list:
            print(model_name ,hmb)
            if train:
                train_dataset = UdacityDataset(root_dir, [hmb], test_composed, type_='train')
            else:
                train_dataset = UdacityDataset(root_dir, [hmb], test_composed, type_='test')
            generator = DataLoader(train_dataset, batch_size=64, shuffle=False, num_workers=8)
            for i, batch in enumerate(generator):
                batch_x = batch['image']
                batch_x = batch_x.type(torch.FloatTensor)
                batch_x = batch_x.to(device)

                _, plt, _, perturbed_image = attack_test.fgsm_attack_test(model, batch_x, target, device, image_size=image_size)
                plt.close()
                if train:
                    for j in range(len(perturbed_image)):
                        np.save('../udacity-data/adv_data/' + model_name + '/fgsm_attack/' + hmb + '/' + str(i*64 + start + j), perturbed_image[j,:,:,:])
                else:
                    np.save('../udacity-data/adv_testing/' + model_name + '/fgsm_attack/' + hmb + '/npy/' + 'batch_' + str(i), perturbed_image)

                _, plt, perturbed_image = attack_test.optimized_uni_test(model, batch_x, device, noise_u, image_size=image_size)
                plt.close()

                if train:
                    for j in range(len(perturbed_image)):
                        np.save('../udacity-data/adv_data/' + model_name + '/universal_attack/' + hmb + '/' + str(i*64 + start + j), perturbed_image[j,:,:,:])
                else:
                    np.save('../udacity-data/adv_testing/' + model_name + '/universal_attack/' + hmb + '/npy/' + 'batch_' + str(i), perturbed_image)
        
                _, plt, perturbed_image = attack_test.advGAN_test(model, batch_x, advGAN_generator, device, image_size=image_size)
                plt.close()
                
                if train:
                    for j in range(len(perturbed_image)):
                        np.save('../udacity-data/adv_data/' + model_name + '/advGAN_attack/' + hmb + '/' + str(i*64 + start + j), perturbed_image[j,:,:,:])
                else:
                    np.save('../udacity-data/adv_testing/' + model_name + '/advGAN_attack/' + hmb + '/npy/' + 'batch_' + str(i), perturbed_image)
                
                _, plt, perturbed_image = attack_test.advGAN_uni_test(model, batch_x, device, noise_a, image_size=image_size)
                plt.close()

                if train:
                    for j in range(len(perturbed_image)):
                        np.save('../udacity-data/adv_data/' + model_name + '/advGAN_universal_attack/' + hmb + '/' + str(i*64 + start + j), perturbed_image[j,:,:,:])
                else:
                    np.save('../udacity-data/adv_testing/' + model_name + '/advGAN_universal_attack/' + hmb + '/npy/' + 'batch_' + str(i), perturbed_image)
    for (model_name, model) in target_models:
        for (hmb, start) in hmb_list:
            print(model_name, hmb)
            if train:
                train_dataset = UdacityDataset(root_dir, [hmb], test_composed, type_='train')
            else:
                train_dataset = UdacityDataset(root_dir, [hmb], test_composed, type_='test')
            # npy = np.array([], dtype=np.float64).reshape(1, 3, 128, 128)
            npy = None

            for i in range(0, len(train_dataset)):

                batch_x = train_dataset[i]['image']
                batch_x = batch_x.unsqueeze(0)
                batch_x = batch_x.type(torch.FloatTensor)
                batch_x = batch_x.to(device)
                _, plt, perturbed_image = attack_test.optimized_attack_test(model, batch_x, target, device, image_size=image_size)
                plt.close()
                if train:
                    for j in range(len(perturbed_image)):
                        np.save('../udacity-data/adv_data/' + model_name + '/opt_attack/' + hmb + '/' + str(i*64 + start + j), perturbed_image[j,:,:,:])
                else:
                    if i == 0:
                        npy = perturbed_image
                    elif i % 64 != 0:
                        npy = np.concatenate((npy, perturbed_image))
                    else:
                        np.save('../udacity-data/adv_testing/' + model_name + '/opt_attack/' + hmb + '/npy/' + 'batch_' + str(i // 64 - 1), npy)
                        npy = perturbed_image
            

            if not train:
                np.save('../udacity-data/adv_testing/' + model_name + '/opt_attack/' + hmb + '/npy/' + 'batch_' + str(5614 // 64), npy)
Ejemplo n.º 18
0
cfig = Config()
net = xceptionAx3(num_classes=1)  #create CNN model.
criterion = nn.BCEWithLogitsLoss()  #define the los

optimizer = optim.SGD(net.parameters(),
                      lr=0.0001,
                      momentum=0.9,
                      weight_decay=0.0001)  #select the optimizer

exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
# create the train_dataset_loader and val_dataset_loader.

train_tarnsformed_dataset = CloudDataset(img_dir='data/images224',
                                         labels_dir='data/masks224/',
                                         transform=transforms.Compose(
                                             [ToTensor()]))

val_tarnsformed_dataset = CloudDataset(img_dir='data/images224',
                                       labels_dir='data/masks224/',
                                       val=True,
                                       transform=transforms.Compose(
                                           [ToTensor()]))

train_dataloader = DataLoader(train_tarnsformed_dataset,
                              batch_size=8,
                              shuffle=True,
                              num_workers=4)

val_dataloader = DataLoader(val_tarnsformed_dataset,
                            batch_size=8,
                            shuffle=True,