def main(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    args_dic = vars(args)

    folder_name = args.AE_folder.split(
        '/')[-1] + '_preprocess_' + args.preprocess_mtd
    output_folder = os.path.join(args.output_dir, folder_name)
    if os.path.exists(output_folder):
        raise ValueError('output folder {0} existed.'.format(output_folder))
    os.mkdir(output_folder)
    img_list = os.listdir(args.AE_folder)

    if args.preprocess_mtd == 'DIM':
        preprocessor = DIM_tranform(224, 224, 330)

    for img_name in tqdm(img_list):
        image_np = load_image(shape=(224, 224),
                              data_format='channels_first',
                              abs_path=True,
                              fpath=os.path.join(args.AE_folder, img_name))
        image_var = numpy_to_variable(image_np)
        image_processed_var = preprocessor(image_var)
        image_processed_np = image_processed_var[0].detach().cpu().numpy()
        image_processed_pil = Image.fromarray(
            (np.transpose(image_processed_np,
                          (1, 2, 0)) * 255).astype(np.uint8))
        image_processed_pil.save(os.path.join(output_folder, img_name))
예제 #2
0
 def __getitem__(self, i):
     index = i % self.len
     image_name = self.image_label_list[index]
     label = [image_name]
     image_path = os.path.join(self.image_dir, image_name)
     img = self.load_data(image_path,
                          self.resize_height,
                          self.resize_width,
                          normalization=False)
     img = numpy_to_variable(img)
     return img, label
예제 #3
0
from attacks.dispersion import DispersionAttack_gpu
import numpy as np
import torchvision
import pdb


def _save_img(imgs_np, file_path):
    img_np = np.transpose((imgs_np[0] * 255).astype(np.uint8), (1, 2, 0))
    img_pil = Image.fromarray(img_np)
    img_pil.save(file_path)
    return

# Resnet152 [4, 5, 6, 7]
# Vgg16 [2, 7, 14, 21, 28]
image_np = np.expand_dims(load_image(data_format='channels_first', fpath='./images/example.png', abs_path=True), axis=0)
image = numpy_to_variable(image_np)
_save_img(image_np, './temp_ori.png')

model = Vgg16()
internal = [i for i in range(29)]
attack = DispersionAttack_gpu(model, epsilon=16./255, step_size=1./255, steps=200)
adv = attack(image, attack_layer_idx_list=[14], internal=internal)

adv_np = variable_to_numpy(adv)
_save_img(adv_np, './temp_adv.png')

diff_np = np.abs(image_np - adv_np)
_save_img(diff_np, './temp_diff.png')

diff_amp_np = diff_np / diff_np.max()
_save_img(diff_amp_np, './temp_diff_amp_{0:.2f}.png'.format(1./diff_np.max()))
def test(args):
    args_dic = vars(args)

    if args.dataset_type == 'voc':
        gt_dir = os.path.join(args.dataset_dir, '_segmentations')

    elif args.dataset_type == 'coco':
        gt_loader = COCO(
            os.path.join(args.dataset_dir, 'instances_val2017.json'))
        to_voc_21 = _convert_label(VOC_AND_COCO91_CLASSES)

    if args.test_model == 'deeplabv3plus':
        args_dic['num_classes'] = 21
        model = DeepLab(num_classes=21,
                        backbone=args.dlv3p_backbone,
                        output_stride=8,
                        sync_bn=False,
                        freeze_bn=True)
        img_size = (513, 513)
        model = model.cuda()
        checkpoint = torch.load(args.pretrained_path)
        model.load_state_dict(checkpoint['state_dict'])
        model.eval()
        #img_transforms = None
        img_mean = [0.485, 0.456, 0.406]
        img_std = [0.229, 0.224, 0.225]
        img_transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize(img_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=img_mean, std=img_std)
        ])
    elif args.test_model == 'deeplabv3_resnet101':
        args_dic['num_classes'] = 21
        model = torchvision.models.segmentation.deeplabv3_resnet101(
            pretrained=True, progress=True, num_classes=21)
        model = model.cuda().eval()
        img_size = (1024, 1024)  #(520, 520)
        img_mean = [0.485, 0.456, 0.406]
        img_std = [0.229, 0.224, 0.225]
        img_transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize(img_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=img_mean, std=img_std)
        ])
    elif args.test_model == 'fcn_resnet101':
        args_dic['num_classes'] = 21
        model = torchvision.models.segmentation.fcn_resnet101(pretrained=True,
                                                              progress=True,
                                                              num_classes=21)
        model = model.cuda().eval()
        img_size = (1024, 1024)
        img_mean = [0.485, 0.456, 0.406]
        img_std = [0.229, 0.224, 0.225]
        img_transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize(img_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=img_mean, std=img_std)
        ])

    else:
        raise ValueError(' ')

    evaluator = Evaluator(args.num_classes)

    test_folders = []
    for temp_folder in os.listdir(args.dataset_dir):
        if not os.path.isdir(os.path.join(args.dataset_dir, temp_folder)):
            continue
        if temp_folder == 'imagenet_val_5000' or temp_folder == '.git' or temp_folder == '_annotations' or temp_folder == '_segmentations':
            continue
        if len(PICK_LIST) != 0 and temp_folder not in PICK_LIST:
            continue
        if len(BAN_LIST) != 0 and temp_folder in BAN_LIST:
            continue
        test_folders.append(temp_folder)

    result_dict = {}
    for curt_folder in tqdm(test_folders):
        print('Folder : {0}'.format(curt_folder))
        evaluator.reset()
        for adv_name in tqdm(
                os.listdir(os.path.join(args.dataset_dir, curt_folder))):
            temp_image_name_noext = os.path.splitext(adv_name)[0]
            if args.dataset_type == 'voc':
                gt_path = os.path.join(gt_dir, temp_image_name_noext + '.png')

            if curt_folder == 'ori':
                adv_img_path = os.path.join(args.dataset_dir, curt_folder,
                                            temp_image_name_noext + '.jpg')
            else:
                adv_img_path = os.path.join(args.dataset_dir, curt_folder,
                                            temp_image_name_noext + '.png')

            if not os.path.exists(adv_img_path):
                print('File {0} not found.'.format(adv_name))
                continue

            if args.dataset_type == 'voc':
                mask_rgb = np.array(Image.open(gt_path))
                idx_255 = np.argwhere(mask_rgb == 255)
                for temp_idx_255 in idx_255:
                    mask_rgb[temp_idx_255[0], temp_idx_255[1]] = 0
                output_ori = cv2.resize(mask_rgb,
                                        img_size,
                                        interpolation=cv2.INTER_NEAREST)
            elif args.dataset_type == 'coco':
                output_ori = load_coco_masks(temp_image_name_noext,
                                             img_size,
                                             gt_loader,
                                             to_voc=to_voc_21)

            if img_transforms == None:
                image_adv_np = load_image(data_format='channels_first',
                                          shape=img_size,
                                          bounds=(0, 1),
                                          abs_path=True,
                                          fpath=adv_img_path)
                image_adv_var = numpy_to_variable(image_adv_np)
                with torch.no_grad():
                    if args.test_model == 'deeplabv3plus':
                        output_adv = model(image_adv_var)
            else:
                image_adv_var = img_transforms(
                    Image.open(adv_img_path).convert('RGB')).unsqueeze_(
                        axis=0).cuda()
                with torch.no_grad():
                    if args.test_model == 'deeplabv3plus':
                        output_adv = model(image_adv_var)
                    else:
                        output_adv = model(image_adv_var)['out']

            pred_ori = np.expand_dims(output_ori, axis=0)
            pred_adv = output_adv.data.cpu().numpy()
            pred_adv = np.argmax(pred_adv, axis=1)

            evaluator.add_batch(pred_ori, pred_adv)

        try:
            Acc = evaluator.Pixel_Accuracy()
            Acc_class = evaluator.Pixel_Accuracy_Class()
            mIoU = evaluator.Mean_Intersection_over_Union()
            FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
        except:
            Acc = 0.
            Acc_class = 0.
            mIoU = 0.
            FWIoU = 0.

        result_str = 'Acc : {0:.04f}, Acc_class : {1:.04f}, mIoU : {2:.04f}, FWIoU : {3:.04f}'.format(
            Acc, Acc_class, mIoU, FWIoU)
        print(curt_folder, ' : ', result_str)
        result_dict[curt_folder] = result_str

        with open(
                'temp_seg_results_gt_{0}_{1}.json'.format(
                    args.test_model, args.dataset_type), 'w') as fout:
            json.dump(result_dict, fout, indent=2)
예제 #5
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    args_dic = vars(args)

    with open('utils/labels.txt', 'r') as inf:
        args_dic['imagenet_dict'] = eval(inf.read())

    input_dir = os.path.join(args.dataset_dir, 'ori')

    if args.test_model == 'fasterrcnn':
        test_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
            pretrained=True).cuda().eval()
        img_size = (416, 416)
    elif args.test_model == 'maskrcnn':
        test_model = torchvision.models.detection.maskrcnn_resnet50_fpn(
            pretrained=True).cuda().eval()
        img_size = (416, 416)
    elif args.test_model == 'keypointrcnn':
        test_model = torchvision.models.detection.keypointrcnn_resnet50_fpn(
            pretrained=True).cuda().eval()
        img_size = (416, 416)
    else:
        raise ValueError('Invalid test_model {0}'.format(args.test_model))

    test_folders = []
    for temp_folder in os.listdir(args.dataset_dir):
        if not os.path.isdir(os.path.join(args.dataset_dir, temp_folder)):
            continue
        if temp_folder == 'imagenet_val_5000' or temp_folder == 'ori' or temp_folder == '.git' or temp_folder == '_annotations' or temp_folder == '_segmentations':
            continue
        if len(PICK_LIST) != 0 and temp_folder not in PICK_LIST:
            continue
        if len(BAN_LIST) != 0 and temp_folder in BAN_LIST:
            continue
        test_folders.append(temp_folder)

    result_dict = {}
    for curt_folder in tqdm(test_folders):
        print('Folder : {0}'.format(curt_folder))

        currentDT = datetime.datetime.now()
        result_dir = 'temp_dect_results_{0}_{1}'.format(
            currentDT.strftime("%Y_%m_%d_%H_%M_%S"), currentDT.microsecond)
        if os.path.exists(result_dir):
            raise
        os.mkdir(result_dir)
        os.mkdir(os.path.join(result_dir, 'gt'))
        os.mkdir(os.path.join(result_dir, 'pd'))
        is_missing = False
        for image_name in tqdm(os.listdir(input_dir)):
            temp_image_name_noext = os.path.splitext(image_name)[0]
            ori_img_path = os.path.join(input_dir, image_name)
            adv_img_path = os.path.join(args.dataset_dir, curt_folder,
                                        image_name)
            adv_img_path = os.path.splitext(adv_img_path)[0] + '.png'
            if not os.path.exists(adv_img_path):
                if not is_missing:
                    is_missing = True
                    print('File {0} not found.'.format(image_name))
                    continue

            image_ori_np = load_image(data_format='channels_first',
                                      shape=img_size,
                                      bounds=(0, 1),
                                      abs_path=True,
                                      fpath=ori_img_path)
            Image.fromarray(
                np.transpose(image_ori_np * 255.,
                             (1, 2, 0)).astype(np.uint8)).save(
                                 os.path.join(result_dir, 'temp_ori.png'))
            image_ori_var = numpy_to_variable(image_ori_np)
            with torch.no_grad():
                gt_out = test_model(image_ori_var)
            gt_out = convert_torch_det_output(gt_out, cs_th=0.3)[0]

            image_adv_np = load_image(data_format='channels_first',
                                      shape=img_size,
                                      bounds=(0, 1),
                                      abs_path=True,
                                      fpath=adv_img_path)
            Image.fromarray(
                np.transpose(image_adv_np * 255.,
                             (1, 2, 0)).astype(np.uint8)).save(
                                 os.path.join(result_dir, 'temp_adv.png'))
            image_adv_var = numpy_to_variable(image_adv_np)
            with torch.no_grad():
                pd_out = test_model(image_adv_var)
            pd_out = convert_torch_det_output(pd_out, cs_th=0.3)[0]

            save_detection_to_file(
                gt_out,
                os.path.join(result_dir, 'gt', temp_image_name_noext + '.txt'),
                'ground_truth')
            save_detection_to_file(
                pd_out,
                os.path.join(result_dir, 'pd', temp_image_name_noext + '.txt'),
                'detection')

            if gt_out:
                save_bbox_img(os.path.join(result_dir, 'temp_ori.png'),
                              gt_out['boxes'],
                              out_file=os.path.join(result_dir,
                                                    'temp_ori_box.png'))
            else:
                save_bbox_img(os.path.join(result_dir, 'temp_ori.png'), [],
                              out_file=os.path.join(result_dir,
                                                    'temp_ori_box.png'))
            if pd_out:
                save_bbox_img(os.path.join(result_dir, 'temp_adv.png'),
                              pd_out['boxes'],
                              out_file=os.path.join(result_dir,
                                                    'temp_adv_box.png'))
            else:
                save_bbox_img(os.path.join(result_dir, 'temp_adv.png'), [],
                              out_file=os.path.join(result_dir,
                                                    'temp_adv_box.png'))

        mAP_score = calculate_mAP_from_files(os.path.join(result_dir, 'gt'),
                                             os.path.join(result_dir, 'pd'))
        shutil.rmtree(result_dir)
        print(curt_folder, ' : ', mAP_score)
        result_dict[curt_folder] = 'mAP: {0:.04f}'.format(mAP_score)

        with open('temp_det_results_{0}.json'.format(args.test_model),
                  'w') as fout:
            json.dump(result_dict, fout, indent=2)
def main(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    args_dic = vars(args)

    with open('utils/labels.txt', 'r') as inf:
        args_dic['imagenet_dict'] = eval(inf.read())

    input_dir = os.path.join(args.dataset_dir, 'ori')

    if args.test_model == 'resnet50':
        test_model = torchvision.models.resnet50(pretrained=True).cuda()
        test_model.eval()

    test_folders = []
    for temp_folder in os.listdir(args.dataset_dir):
        if not os.path.isdir(os.path.join(args.dataset_dir, temp_folder)):
            continue
        if temp_folder == 'imagenet_val_5000' or temp_folder == 'ori' or temp_folder == '.git':
            continue
        if len(PICK_LIST) != 0 and temp_folder not in PICK_LIST:
            continue
        if len(BAN_LIST) != 0 and temp_folder in BAN_LIST:
            continue
        test_folders.append(temp_folder)

    result_dict = {}
    for curt_folder in tqdm(test_folders):
        print('Folder : {0}'.format(curt_folder))
        correct_count = 0
        total_count = 0
        for image_name in tqdm(os.listdir(input_dir)):
            image_ori_path = os.path.join(input_dir, image_name)
            image_adv_path = os.path.join(args.dataset_dir, curt_folder,
                                          image_name)
            image_adv_path = os.path.splitext(image_adv_path)[0] + '.png'

            image_ori_np = load_image(data_format='channels_first',
                                      abs_path=True,
                                      fpath=image_ori_path)
            image_adv_np = load_image(data_format='channels_first',
                                      abs_path=True,
                                      fpath=image_adv_path)
            image_ori_var = numpy_to_variable(image_ori_np)
            image_adv_var = numpy_to_variable(image_adv_np)

            logits_ori = test_model(image_ori_var)
            logits_adv = test_model(image_adv_var)

            y_ori_var = logits_ori.argmax()
            y_adv_var = logits_adv.argmax()

            total_count += 1
            if y_ori_var == y_adv_var:
                correct_count += 1
        print('{0} samples are correctly labeled over {1} samples.'.format(
            correct_count, total_count))
        acc = float(correct_count) / float(total_count)
        print('Accuracy for {0} : {1}'.format(curt_folder, acc))
        result_dict[curt_folder] = str(acc)

    with open('temp_cls_results_{0}.json'.format(args.test_model),
              'w') as fout:
        json.dump(result_dict, fout, indent=2)
def main(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    args_dic = vars(args)

    with open('utils/labels.txt', 'r') as inf:
        args_dic['imagenet_dict'] = eval(inf.read())

    args_dic['input_dir'] = os.path.join(args.dataset_dir, 'ori')

    target_model = None
    internal = None
    attack = None
    attack_layer_idx = None
    if args.adv_method == 'dr':
        loss_mtd = args.loss_method
        if args.target_model == 'vgg16':
            assert args.vgg16_attacklayer != -1
            target_model = Vgg16()
            internal = [i for i in range(29)]
            attack_layer_idx = [args.vgg16_attacklayer]  # 12, 14
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'resnet152':
            assert args.res152_attacklayer != -1
            target_model = Resnet152()
            internal = [i for i in range(9)]
            attack_layer_idx = [args.res152_attacklayer]  # #[4, 5, 6, 7]
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'inception_v3':
            assert args.inc3_attacklayer != -1
            target_model = Inception_v3()
            internal = [i for i in range(14)]
            attack_layer_idx = [args.inc3_attacklayer]  # [3, 4, 7, 8, 12]
            args_dic['image_size'] = (299, 299)
        else:
            raise

        attack = DispersionAttack_gpu(target_model,
                                      epsilon=args.epsilon / 255.,
                                      step_size=args.step_size / 255.,
                                      steps=args.steps,
                                      loss_mtd=loss_mtd)

    elif args.adv_method == 'tidim' or args.adv_method == 'dim' or args.adv_method == 'mifgsm' or args.adv_method == 'pgd':
        attack_layer_idx = [0]
        internal = [0]
        loss_mtd = ''

        if args.target_model == 'vgg16':
            target_model = torchvision.models.vgg16(
                pretrained=True).cuda().eval()
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'resnet152':
            target_model = torchvision.models.resnet152(
                pretrained=True).cuda().eval()
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'inception_v3':
            target_model = torchvision.models.inception_v3(
                pretrained=True).cuda().eval()
            args_dic['image_size'] = (299, 299)
        else:
            raise ValueError('Invalid adv_method.')

        if args.adv_method == 'dim':
            attack = DIM_Attack(target_model,
                                decay_factor=1,
                                prob=0.5,
                                epsilon=args.epsilon / 255.,
                                step_size=args.step_size / 255.,
                                steps=args.steps,
                                image_resize=330)
        elif args.adv_method == 'mifgsm':
            attack = MomentumIteratorAttack(target_model,
                                            decay_factor=0.5,
                                            epsilon=args.epsilon / 255.,
                                            step_size=args.step_size / 255.,
                                            steps=args.steps,
                                            random_start=False)
        elif args.adv_method == 'pgd':
            attack = LinfPGDAttack(target_model,
                                   epsilon=args.epsilon / 255.,
                                   a=args.step_size / 255.,
                                   k=args.steps,
                                   random_start=False)
        elif args.adv_method == 'tidim':
            attack = TIDIM_Attack(target_model,
                                  decay_factor=1,
                                  prob=0.5,
                                  epsilon=args.epsilon / 255.,
                                  step_size=args.step_size / 255.,
                                  steps=args.steps,
                                  image_resize=330)

    else:
        raise ValueError('Invalid adv_mdthod.')
    assert target_model != None and internal != None and attack != None and attack_layer_idx != None
    attack_layer_idx_str = ''
    for layer_idx in attack_layer_idx:
        attack_layer_idx_str += (str(layer_idx) + '_')
    attack_layer_idx_str = attack_layer_idx_str[:-1]

    if not DEBUG:
        args_dic['output_dir'] = os.path.join(
            args.dataset_dir,
            '{0}_{1}_layerAt_{2}_eps_{3}_stepsize_{4}_steps_{5}_lossmtd_{6}'.
            format(args.adv_method, args.target_model, attack_layer_idx_str,
                   args.epsilon, args.step_size, args.steps, loss_mtd))

        if os.path.exists(args.output_dir):
            raise ValueError('Output folder existed.')
        os.mkdir(args.output_dir)

    count = 0
    images_list = []
    names_list = []
    total_images = len(os.listdir(args.input_dir))
    assert args.batch_size > 0
    for image_count, image_name in enumerate(tqdm(os.listdir(args.input_dir))):
        image_path = os.path.join(args.input_dir, image_name)
        image_np = load_image(shape=args.image_size,
                              data_format='channels_first',
                              abs_path=True,
                              fpath=image_path)
        images_list.append(image_np)
        names_list.append(image_name)
        count += 1
        if count < args.batch_size and image_count != total_images - 1:
            continue

        images_np = np.array(images_list)
        count = 0
        images_list = []

        images_var = numpy_to_variable(images_np)
        if args.adv_method == 'dr':
            advs = attack(images_var, attack_layer_idx, internal)
        else:
            assert args.batch_size == 1, 'Baselines are not tested for batch input.'
            target_model.eval()
            logits_nat = target_model(images_var)
            y_var = logits_nat.argmax().long().unsqueeze(0)
            advs = attack(images_var.cpu(), y_var.cpu())

        if not DEBUG:
            advs_np = variable_to_numpy(advs)
            for idx, adv_np in enumerate(advs_np):
                image_pil = Image.fromarray(
                    np.transpose((adv_np * 255).astype(np.uint8), (1, 2, 0)))
                image_pil.save(
                    os.path.join(args.output_dir,
                                 os.path.splitext(names_list[idx])[0] +
                                 '.png'))
        names_list = []
images_name = os.listdir(dataset_dir_ori)

test_model = torchvision.models.densenet121(
    pretrained='imagenet').cuda().eval()

for idx, temp_image_name in enumerate(tqdm(images_name)):
    total_samples = len(images_name)
    ori_img_path = os.path.join(dataset_dir_ori, temp_image_name)
    adv_img_path = os.path.join(dataset_dir_adv, temp_image_name)

    image_ori_np = load_image(data_format='channels_first',
                              shape=(224, 224),
                              bounds=(0, 1),
                              abs_path=True,
                              fpath=ori_img_path)
    image_ori_var = numpy_to_variable(image_ori_np)
    gt_out = test_model(image_ori_var).detach().cpu().numpy()
    gt_label = np.argmax(gt_out)

    image_adv_np = load_image(data_format='channels_first',
                              shape=(224, 224),
                              bounds=(0, 1),
                              abs_path=True,
                              fpath=adv_img_path)
    image_adv_var = numpy_to_variable(image_adv_np)
    pd_out = test_model(image_adv_var).detach().cpu().numpy()
    pd_label = np.argmax(pd_out)

    print('idx: ', idx)
    print('ground truth: ', gt_label)
    print('prediction: ', pd_label)
예제 #9
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    args_dic = vars(args)

    if args.dataset_type == 'voc':
        gt_dir = os.path.join(args.dataset_dir, '_annotations')
    elif args.dataset_type == 'coco':
        gt_loader = COCO(
            os.path.join(args.dataset_dir, 'instances_val2017.json'))

    if args.test_model == 'fasterrcnn':
        test_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
            pretrained=True).cuda().eval()
        img_size = (416, 416)
    elif args.test_model == 'maskrcnn':
        test_model = torchvision.models.detection.maskrcnn_resnet50_fpn(
            pretrained=True).cuda().eval()
        img_size = (416, 416)
    elif args.test_model == 'keypointrcnn':
        test_model = torchvision.models.detection.keypointrcnn_resnet50_fpn(
            pretrained=True).cuda().eval()
        img_size = (416, 416)
    else:
        raise ValueError('Invalid test_model {0}'.format(args.test_model))

    test_folders = []
    for temp_folder in os.listdir(args.dataset_dir):
        if not os.path.isdir(os.path.join(args.dataset_dir, temp_folder)):
            continue
        if temp_folder == 'imagenet_val_5000' or temp_folder == '.git' or temp_folder == '_annotations' or temp_folder == '_segmentations':
            continue
        if len(PICK_LIST) != 0 and temp_folder not in PICK_LIST:
            continue
        if len(BAN_LIST) != 0 and temp_folder in BAN_LIST:
            continue
        test_folders.append(temp_folder)

    result_dict = {}
    for curt_folder in tqdm(test_folders):
        print('Folder : {0}'.format(curt_folder))
        currentDT = datetime.datetime.now()
        result_dir = 'temp_dect_results_{0}_{1}'.format(
            currentDT.strftime("%Y_%m_%d_%H_%M_%S"), currentDT.microsecond)
        if os.path.exists(result_dir):
            raise
        os.mkdir(result_dir)
        os.mkdir(os.path.join(result_dir, 'gt'))
        os.mkdir(os.path.join(result_dir, 'pd'))
        for adv_name in tqdm(
                os.listdir(os.path.join(args.dataset_dir, curt_folder))):
            temp_image_name_noext = os.path.splitext(adv_name)[0]
            if args.dataset_type == 'voc':
                gt_path = os.path.join(gt_dir, temp_image_name_noext + '.xml')

            if curt_folder == 'ori':
                adv_img_path = os.path.join(args.dataset_dir, curt_folder,
                                            temp_image_name_noext + '.jpg')
            else:
                adv_img_path = os.path.join(args.dataset_dir, curt_folder,
                                            temp_image_name_noext + '.png')

            if not os.path.exists(adv_img_path):
                print('File {0} not found.'.format(adv_name))
                continue

            if args.dataset_type == 'voc':
                gt_out = load_voc_annotations(gt_path, img_size)
                gt_out['classes'] = gt_out['classes'].astype(np.int)
            elif args.dataset_type == 'coco':
                gt_out = load_coco_annotations(temp_image_name_noext, img_size,
                                               gt_loader)

            if args.test_model == 'keypointrcnn':
                gt_out = only_person(gt_out, args)

            image_adv_np = load_image(data_format='channels_first',
                                      shape=img_size,
                                      bounds=(0, 1),
                                      abs_path=True,
                                      fpath=adv_img_path)
            Image.fromarray(
                np.transpose(image_adv_np * 255.,
                             (1, 2, 0)).astype(np.uint8)).save(
                                 os.path.join(result_dir, 'temp_adv.png'))
            image_adv_var = numpy_to_variable(image_adv_np)
            with torch.no_grad():
                pd_out = test_model(image_adv_var)
            pd_out = convert_torch_det_output(pd_out, cs_th=0.5)[0]

            if args.dataset_type == 'voc':
                pd_out = _transfer_label_to_voc(pd_out, args)

            bbox_list = pd_out['boxes']
            for idx, temp_bbox in enumerate(bbox_list):
                pd_out['boxes'][idx] = [
                    temp_bbox[1], temp_bbox[0], temp_bbox[3], temp_bbox[2]
                ]

            save_detection_to_file(
                gt_out,
                os.path.join(result_dir, 'gt', temp_image_name_noext + '.txt'),
                'ground_truth')
            save_detection_to_file(
                pd_out,
                os.path.join(result_dir, 'pd', temp_image_name_noext + '.txt'),
                'detection')

            if pd_out:
                save_bbox_img(os.path.join(result_dir, 'temp_adv.png'),
                              pd_out['boxes'],
                              out_file=os.path.join(result_dir,
                                                    'temp_adv_box.png'))
            else:
                save_bbox_img(os.path.join(result_dir, 'temp_adv.png'), [],
                              out_file=os.path.join(result_dir,
                                                    'temp_adv_box.png'))

        mAP_score = calculate_mAP_from_files(os.path.join(result_dir, 'gt'),
                                             os.path.join(result_dir, 'pd'))

        shutil.rmtree(result_dir)
        print(curt_folder, ' : ', mAP_score)
        result_dict[curt_folder] = 'mAP: {0:.04f}'.format(mAP_score)

        with open(
                'temp_det_results_gt_{0}_{1}.json'.format(
                    args.test_model, args.dataset_type), 'w') as fout:
            json.dump(result_dict, fout, indent=2)
예제 #10
0
def test(args):
    args_dic = vars(args)

    input_dir = os.path.join(args.dataset_dir, 'ori')

    if args.test_model == 'deeplabv3plus':
        args_dic['num_classes'] = 21
        model = DeepLab(num_classes=21,
                        backbone=args.dlv3p_backbone,
                        output_stride=8,
                        sync_bn=False,
                        freeze_bn=True)
        img_size = (513, 513)
        model = model.cuda()
        checkpoint = torch.load(args.pretrained_path)
        model.load_state_dict(checkpoint['state_dict'])
        model.eval()
        #img_transforms = None
        img_mean = [0.485, 0.456, 0.406]
        img_std = [0.229, 0.224, 0.225]
        img_transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize(img_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=img_mean, std=img_std)
        ])
    elif args.test_model == 'deeplabv3_resnet101':
        args_dic['num_classes'] = 21
        model = torchvision.models.segmentation.deeplabv3_resnet101(
            pretrained=True, progress=True, num_classes=21)
        model = model.cuda().eval()
        img_size = (1024, 1024)  #(520, 520)
        img_mean = [0.485, 0.456, 0.406]
        img_std = [0.229, 0.224, 0.225]
        img_transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize(img_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=img_mean, std=img_std)
        ])
    elif args.test_model == 'fcn_resnet101':
        args_dic['num_classes'] = 21
        model = torchvision.models.segmentation.fcn_resnet101(pretrained=True,
                                                              progress=True,
                                                              num_classes=21)
        model = model.cuda().eval()
        img_size = (1024, 1024)
        img_mean = [0.485, 0.456, 0.406]
        img_std = [0.229, 0.224, 0.225]
        img_transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize(img_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=img_mean, std=img_std)
        ])

    else:
        raise ValueError(' ')

    evaluator = Evaluator(args.num_classes)

    test_folders = []
    for temp_folder in os.listdir(args.dataset_dir):
        if not os.path.isdir(os.path.join(args.dataset_dir, temp_folder)):
            continue
        if temp_folder == 'imagenet_val_5000' or temp_folder == 'ori' or temp_folder == '.git' or temp_folder == '_annotations' or temp_folder == '_segmentations':
            continue
        if len(PICK_LIST) != 0 and temp_folder not in PICK_LIST:
            continue
        if len(BAN_LIST) != 0 and temp_folder in BAN_LIST:
            continue
        test_folders.append(temp_folder)

    result_dict = {}
    for curt_folder in tqdm(test_folders):
        print('Folder : {0}'.format(curt_folder))
        evaluator.reset()
        for image_name in tqdm(os.listdir(input_dir)):
            temp_image_name_noext = os.path.splitext(image_name)[0]
            ori_img_path = os.path.join(input_dir, image_name)
            adv_img_path = os.path.join(args.dataset_dir, curt_folder,
                                        image_name)
            adv_img_path = os.path.splitext(adv_img_path)[0] + '.png'
            if img_transforms == None:
                image_ori_np = load_image(data_format='channels_first',
                                          shape=img_size,
                                          bounds=(0, 1),
                                          abs_path=True,
                                          fpath=ori_img_path)
                #Image.fromarray(np.transpose(image_ori_np * 255., (1, 2, 0)).astype(np.uint8)).save('ori.jpg')
                image_ori_var = numpy_to_variable(image_ori_np)
                with torch.no_grad():
                    if args.test_model == 'deeplabv3plus':
                        output_ori = model(image_ori_var)
            else:
                image_ori_var = img_transforms(
                    Image.open(ori_img_path).convert('RGB')).unsqueeze_(
                        axis=0).cuda()
                with torch.no_grad():
                    if args.test_model == 'deeplabv3plus':
                        output_ori = model(image_ori_var)
                    else:
                        output_ori = model(image_ori_var)['out']

            #Image.fromarray((output_ori[0].argmax(axis=0).cpu().numpy().astype(np.float32) / 21. * 255.).astype(np.uint8)).save('ori_fm.jpg')

            if img_transforms == None:
                image_adv_np = load_image(data_format='channels_first',
                                          shape=img_size,
                                          bounds=(0, 1),
                                          abs_path=True,
                                          fpath=adv_img_path)
                #Image.fromarray(np.transpose(image_adv_np * 255., (1, 2, 0)).astype(np.uint8)).save('temp_adv.jpg')
                image_adv_var = numpy_to_variable(image_adv_np)
                with torch.no_grad():
                    if args.test_model == 'deeplabv3plus':
                        output_adv = model(image_adv_var)
            else:
                image_adv_var = img_transforms(
                    Image.open(adv_img_path).convert('RGB')).unsqueeze_(
                        axis=0).cuda()
                with torch.no_grad():
                    if args.test_model == 'deeplabv3plus':
                        output_adv = model(image_adv_var)
                    else:
                        output_adv = model(image_adv_var)['out']

            #Image.fromarray((output_adv[0].argmax(axis=0).cpu().numpy().astype(np.float32) / 21. * 255.).astype(np.uint8)).save('adv_fm.jpg')

            pred_ori = output_ori.data.cpu().numpy()
            pred_ori = np.argmax(pred_ori, axis=1)
            pred_adv = output_adv.data.cpu().numpy()
            pred_adv = np.argmax(pred_adv, axis=1)

            evaluator.add_batch(pred_ori, pred_adv)

        try:
            Acc = evaluator.Pixel_Accuracy()
            Acc_class = evaluator.Pixel_Accuracy_Class()
            mIoU = evaluator.Mean_Intersection_over_Union()
            FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
        except:
            Acc = 0.
            Acc_class = 0.
            mIoU = 0.
            FWIoU = 0.

        result_str = 'Acc : {0:.04f}, Acc_class : {1:.04f}, mIoU : {2:.04f}, FWIoU : {3:.04f}'.format(
            Acc, Acc_class, mIoU, FWIoU)
        print(curt_folder, ' : ', result_str)
        result_dict[curt_folder] = result_str

        with open('temp_seg_results_{0}.json'.format(args.test_model),
                  'w') as fout:
            json.dump(result_dict, fout, indent=2)