def train(self):
        """
        Optimize a patch to generate an adversarial example.
        :return: Nothing
        """

        img_size = 800
        batch_size = 1
        n_t_op_steps = 5000
        max_lab = 14

        ATTACK_TASK = 'target'

        # TARGET_CLASS = 'dog'
        TARGET_CLASS = 16
        # ATTACK_TASK = 'untarget'

        time_str = time.strftime("%Y%m%d-%H%M%S")



        conv_size = 2
        kernel1 = gkern(2*conv_size+1, 3).astype(np.float32)
        stack_kernel1 = np.stack([kernel1, kernel1, kernel1]).swapaxes(2, 0)
        stack_kernel1 = np.expand_dims(stack_kernel1, 3)
        stack_kernel1 = torch.from_numpy(stack_kernel1).permute(2,3,0,1).float()



        # Dataset prepare
        
        data_obj = CocoTrainPerson(dataType='train2017',num_use=100)

        dataloader_obj = DataLoader(data_obj, batch_size=1,shuffle=False) #使用DataLoader加载数据

        # img info prepare
        img_frcn = get_Image_ready(self.Faster_RCNN, '1016.png')
        img_frcn['img_metas'][0][0]['filename'] = None
        img_frcn['img_metas'][0][0]['ori_filename'] = None
        img_frcn['img_metas'][0][0]['ori_shape'] = None
        img_frcn['img_metas'][0][0]['pad_shape'] = None
        img_frcn['img_metas'][0][0]['scale_factor'] = None

        # attack_area_rate = 0.2
        ATTACK_AREA_RATE = 0.1
        decay_t_op_step = 100
        batch_size_sp = 3
        population_num = 300 # 36
        optim_step_num = 300
        k = 0
        for i_batch, batch_data in enumerate(dataloader_obj):
            img, mask, bbox, class_label = batch_data[0][0], batch_data[1][0], batch_data[2][0], batch_data[3][0]
            # img  : 3,500,500
            # mask : 500,500
            # bbox : x1,y1,w,h
            # class_label : tensor[]

            img_name = batch_data[4][0]
            mask_area = torch.sum(mask)
            # if img_name.split('_')[0] != '000000001815':
            #     continue

            print('---------------')
            print(img_name)
            print('---------------')


            
            # use segment SLIC
            base_SLIC_seed_num = 3000
            img_np = img.numpy().transpose(1,2,0)
            mask_np = mask.numpy()
            numSegments = int(base_SLIC_seed_num/(500*500)*torch.sum(mask))
            segments_np = slic(image=img_np, n_segments=numSegments, sigma=0, slic_zero=True, mask=mask_np)
            segments_tensor = torch.from_numpy(segments_np).float().cuda()
            segments_label = torch.unique(segments_tensor)
            segments_label = segments_label[1:]


            # define theta_m
            # pay attention to the center and the boundary

            # (0) prepare stack of sp
            # (1) find the center sp
            # (2) find the boundary sp

            # # (0) prepare stack of sp
            zero_layer = torch.zeros_like(segments_tensor)
            one_layer = torch.ones_like(segments_tensor)
            # segments_stack = torch.stack([torch.where(segments_tensor==segments_label[j], segments_tensor, zero_layer) for j in range(segments_label.shape[0])], dim=0)
            

            
            # # (1) find the center sp
            bbox_x1 = bbox[0]
            bbox_y1 = bbox[1]
            bbox_w = bbox[2]
            bbox_h = bbox[3]

            bbox_x_c = bbox_x1 + bbox_w/2
            bbox_y_c = bbox_y1 + bbox_h/2
            bbox_x_c_int = int(bbox_x_c)
            bbox_y_c_int = int(bbox_y_c)

         

            # 3 load attack region 
            load_patch_dir = '../common_data/NES_search_test_1107/'+img_name.split('_')[0]

            load_patch_list = os.listdir(load_patch_dir)
            load_patch_list.sort()
            wat_num_max = 0
            for i_name in load_patch_list:
                wat_num = int(i_name.split('_')[0])
                if wat_num > wat_num_max:
                    wat_num_max = wat_num
            for i_name in load_patch_list:
                wat_num = int(i_name.split('_')[0])
                if wat_num == wat_num_max:
                    max_name = i_name
                    break

            load_patch = os.path.join(load_patch_dir, max_name)

            load_img = Image.open(load_patch).convert('RGB')
            load_img = transforms.ToTensor()(load_img)
            region_mask = 2*load_img - img.cpu()
            region_mask = torch.sum(region_mask,dim=0)/3
            region_mask = torch.where(mask>0, region_mask,torch.zeros_like(region_mask))


            attack_region_tmp_pil = transforms.ToPILImage()(region_mask.cpu())
            attack_region_tmp_pil.save('013k.png')
            # process mask
            region_mask_new = torch.zeros_like(region_mask).cuda()
            for i in range(segments_label.shape[0]):
                sp =  segments_label[i]
                right_color = (torch.where(segments_tensor==sp,region_mask.cuda(),one_layer*(-10))).cpu()
                right_color = torch.mean(right_color[right_color!=-10])
                color_layer = torch.ones_like(segments_tensor).fill_(right_color)
                region_mask_new = torch.where(segments_tensor==sp, color_layer, region_mask_new)      
            region_mask_new = region_mask_new
            region_mask = region_mask_new
            region_mask_unique = torch.unique(region_mask)
            for i in range(region_mask_unique.shape[0]):
                thres = region_mask_unique[i]
                # region_mask_tmp = torch.zeros_like(region_mask)
                region_mask_tmp = torch.where(region_mask>thres, one_layer, zero_layer)
                pixel_num = torch.sum(region_mask_tmp)
                if pixel_num < mask_area * ATTACK_AREA_RATE:
                    break
            attack_region_search_top = region_mask_tmp
            attack_region_search_top = get_conv_envl(attack_region_search_top)

           
            attack_region_tmp = attack_region_search_top

            attack_region_tmp = attack_region_tmp.cuda()
            print('---------------')
            print('You have used ', float(torch.sum(attack_region_tmp)/mask_area), 'area.')
            print('---------------')
             ## start at gray
            adv_patch_w = torch.zeros(3,500,500).cuda()

            adv_patch_w.requires_grad_(True)

            optimizer = optim.Adam([
                {'params': adv_patch_w, 'lr': 0.1}
            ], amsgrad=True)

            t_op_num = 800
            min_max_iou_record = 1
            for t_op_step in range(t_op_num):
                adv_patch = torch.sigmoid(adv_patch_w)
                patched_img = torch.where(attack_region_tmp>0, adv_patch, img.cuda()).unsqueeze(0)
              
                patched_img_255 = patched_img * 255.
                patched_img_rsz = F.interpolate(patched_img_255, (416, 416), mode='bilinear').cuda()
                patched_img_nom_rsz = (patched_img_rsz - self.mean) / self.std

                batch_size_now = patched_img_255.shape[0]

                # output
                img_new = copy.deepcopy(img_frcn)
                img_new['img'][0] = patched_img_nom_rsz
                yolo_output = self.YOLOv3(return_loss=False, rescale=False,  **img_new)
                # output formate is [x1,y1,x2,y2]


                # anaylize yolo_output [batch_size]
                # [
                # ( multi_lvl_bboxes, multi_lvl_cls_scores, multi_lvl_conf_scores )
                # multi_lvl_bboxes  [ 3 layers ]
                # [ [0]       1875, 4           
                #   [1]       7500, 4           
                #   [2]       30000,4   ]
                #                     
                # multi_lvl_cls_scores                    
                # [ [0]       1875, 80           
                #   [1]       7500, 80          
                #   [2]       30000,80  ]
                #                     
                # multi_lvl_conf_scores                    
                # [ [0]       1875          
                #   [1]       7500          
                #   [2]       30000     ]
                #  * batch_size
                # ]                   

                # merge yolo output
                multi_lvl_bboxes_batch = []  
                multi_lvl_cls_scores_batch = []
                multi_lvl_conf_scores_batch = []

                for i_b in range(batch_size_now):
                    multi_lvl_bboxes_batch += yolo_output[i_b][0]
                    multi_lvl_cls_scores_batch += yolo_output[i_b][1]
                    multi_lvl_conf_scores_batch += yolo_output[i_b][2]

                multi_lvl_bboxes_batch = torch.cat(multi_lvl_bboxes_batch, dim=0) 
                multi_lvl_cls_scores_batch = torch.cat(multi_lvl_cls_scores_batch, dim=0) 
                multi_lvl_conf_scores_batch = torch.cat(multi_lvl_conf_scores_batch, dim=0) 

                # objectness loss
                objectness_loss = torch.sum(multi_lvl_conf_scores_batch[multi_lvl_conf_scores_batch>0.05])

                # class loss
                attack_class_score = multi_lvl_cls_scores_batch[:,class_label]
                # attack_class_score = attack_class_score[attack_class_score>0.5]
                attack_class_score = torch.sort(attack_class_score, descending=True)[0][:30]
                cls_loss = torch.sum(attack_class_score)

                # target class loss
                attack_class_score_target = multi_lvl_cls_scores_batch[:,16]
                attack_class_score_target = attack_class_score_target[multi_lvl_conf_scores_batch>0.5]
                attack_class_score_target = attack_class_score_target[attack_class_score_target<0.9]
                attack_class_score_target = torch.sort(attack_class_score_target, descending=True)[0][:30]
                cls_target_loss = - torch.sum(attack_class_score_target)




                # iou loss
                bbox_x1 = bbox[0]/500*416
                bbox_y1 = bbox[1]/500*416
                bbox_w = bbox[2]/500*416
                bbox_h = bbox[3]/500*416
                ground_truth_bbox = [bbox_x1, bbox_y1, bbox_x1+bbox_w, bbox_y1 + bbox_h]
                ground_truth_bbox = torch.Tensor(ground_truth_bbox).unsqueeze(0).cuda()
                iou_all = compute_iou_tensor(multi_lvl_bboxes_batch, ground_truth_bbox)
                iou_positive = iou_all[iou_all>0.05]
                iou_loss = torch.sum(iou_all)


                # class loss selected by IoU
                attack_class_score = multi_lvl_cls_scores_batch[:,class_label]
                attack_class_score_iou = attack_class_score[iou_all>0.05]
                attack_class_score_iou_sort = torch.sort(attack_class_score_iou, descending=True)[0][:30]
                cls_iou_loss = torch.sum(attack_class_score_iou_sort)




                # rpn loss
                # : to make every proposal smaller to its center
                rpn_ctx = (multi_lvl_bboxes_batch[:,0] + multi_lvl_bboxes_batch[:,2])/2
                rpn_cty = (multi_lvl_bboxes_batch[:,1] + multi_lvl_bboxes_batch[:,3])/2
                rpn_box = multi_lvl_bboxes_batch[:,:4]
                rpn_ctx = rpn_ctx.unsqueeze(-1)
                rpn_cty = rpn_cty.unsqueeze(-1)
                rpn_box_target = torch.cat([rpn_ctx,rpn_cty,rpn_ctx,rpn_cty], dim=-1)
                rpn_loss = l1_norm(multi_lvl_conf_scores_batch.unsqueeze(-1).repeat(1,4)*(multi_lvl_bboxes_batch - rpn_box_target))
                


                
                # total_loss = cls_loss + objectness_loss + rpn_loss + cls_target_loss + cls_iou_loss
                # total_loss =  cls_target_loss*100 + cls_iou_loss*100  #+ rpn_loss
                total_loss =  cls_iou_loss*100  + rpn_loss

                total_loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                



                # ----------------------------------
                # ------------------------
                # early stop
                if t_op_step %30:
                    print(  t_op_step,
                                'iou', float(torch.max(iou_all)), 
                                'cls', float(torch.max(attack_class_score)),
                                'obj', float(torch.max(multi_lvl_conf_scores_batch)))

                #test
                patched_img_cpu = patched_img.cpu().squeeze()
                test_confidence_threshold = 0.45


                iou_max = torch.max(iou_all)
                if iou_max < 0.05 or torch.max(multi_lvl_conf_scores_batch) < 0.45:
                    print('Break at',t_op_step,'iou final max:', torch.max(iou_all))
                    # save image
                    patched_img_cpu_pil = transforms.ToPILImage()(patched_img_cpu)
                    out_file_path = os.path.join('../common_data/NES_attack/YOLO3/success'+str(int(ATTACK_AREA_RATE*100)), img_name)
                    patched_img_cpu_pil.save(out_file_path)

                    
                    break

                # report 

                
                max_iou = torch.max(iou_all)
                if max_iou < min_max_iou_record:
                    min_max_iou_record = max_iou
                    txt_save_dir =  '../common_data/NES_attack/YOLO3/iou'+str(int(ATTACK_AREA_RATE*100))
                    txt_save_path = os.path.join(txt_save_dir, img_name.split('.')[0]+'.txt')
                    with open(txt_save_path,'w') as f:
                        text = str(float(max_iou))
                        f.write(text)

                if t_op_step % 100 == 0:

                    iou_sort = torch.sort(iou_all,descending=True)[0][:6].detach().clone().cpu()

                    print(t_op_step, 'iou t-cls  :', max_iou)

                  print()
示例#2
0
    def train(self):
        """
        Optimize a patch to generate an adversarial example.
        :return: Nothing
        """

        img_size = 800
        batch_size = 1
        n_t_op_steps = 5000
        max_lab = 14

        ATTACK_TASK = 'target'

        # TARGET_CLASS = 'dog'
        TARGET_CLASS = 16
        # ATTACK_TASK = 'untarget'

        time_str = time.strftime("%Y%m%d-%H%M%S")

        conv_size = 2
        kernel1 = gkern(2 * conv_size + 1, 3).astype(np.float32)
        stack_kernel1 = np.stack([kernel1, kernel1, kernel1]).swapaxes(2, 0)
        stack_kernel1 = np.expand_dims(stack_kernel1, 3)
        stack_kernel1 = torch.from_numpy(stack_kernel1).permute(2, 3, 0,
                                                                1).float()

        # Dataset prepare

        data_obj = CocoTrainPerson(dataType='train2017', num_use=100)

        dataloader_obj = DataLoader(data_obj, batch_size=1,
                                    shuffle=False)  #使用DataLoader加载数据

        # img info prepare
        img_frcn = get_Image_ready(self.Mask_RCNN, '1016.png')
        img_frcn['img_metas'][0][0]['filename'] = None
        img_frcn['img_metas'][0][0]['ori_filename'] = None
        img_frcn['img_metas'][0][0]['ori_shape'] = None
        img_frcn['img_metas'][0][0]['pad_shape'] = None
        img_frcn['img_metas'][0][0]['scale_factor'] = None

        # attack_area_rate = 0.2
        ATTACK_AREA_RATE = 0.0
        decay_t_op_step = 100
        batch_size_sp = 3
        population_num = 300  # 36
        optim_step_num = 300
        k = 0
        for i_batch, batch_data in enumerate(dataloader_obj):
            img, mask, bbox, class_label = batch_data[0][0], batch_data[1][
                0], batch_data[2][0], batch_data[3][0]
            # img  : 3,500,500
            # mask : 500,500
            # bbox : x1,y1,w,h
            # class_label : tensor[]

            img_name = batch_data[4][0]
            mask_area = torch.sum(mask)
            # if img_name.split('_')[0] != '000000001815':
            #     continue

            print('---------------')
            print(img_name)
            print('---------------')

            # use segment SLIC
            base_SLIC_seed_num = 3000
            img_np = img.numpy().transpose(1, 2, 0)
            mask_np = mask.numpy()
            numSegments = int(base_SLIC_seed_num / (500 * 500) *
                              torch.sum(mask))
            segments_np = slic(image=img_np,
                               n_segments=numSegments,
                               sigma=0,
                               slic_zero=True,
                               mask=mask_np)
            segments_tensor = torch.from_numpy(segments_np).float().cuda()
            segments_label = torch.unique(segments_tensor)
            segments_label = segments_label[1:]

            zero_layer = torch.zeros_like(segments_tensor)
            one_layer = torch.ones_like(segments_tensor)
            # segments_stack = torch.stack([torch.where(segments_tensor==segments_label[j], segments_tensor, zero_layer) for j in range(segments_label.shape[0])], dim=0)

            # # (1) find the center sp
            bbox_x1 = bbox[0]
            bbox_y1 = bbox[1]
            bbox_w = bbox[2]
            bbox_h = bbox[3]

            bbox_x_c = bbox_x1 + bbox_w / 2
            bbox_y_c = bbox_y1 + bbox_h / 2
            bbox_x_c_int = int(bbox_x_c)
            bbox_y_c_int = int(bbox_y_c)

            load_patch_dir = '../common_data/NES_search_test_1107/' + img_name.split(
                '_')[0]

            load_patch_list = os.listdir(load_patch_dir)
            load_patch_list.sort()
            wat_num_max = 0
            for i_name in load_patch_list:
                wat_num = int(i_name.split('_')[0])
                if wat_num > wat_num_max:
                    wat_num_max = wat_num
            for i_name in load_patch_list:
                wat_num = int(i_name.split('_')[0])
                if wat_num == wat_num_max:
                    max_name = i_name
                    break

            load_patch = os.path.join(load_patch_dir, max_name)

            load_img = Image.open(load_patch).convert('RGB')
            load_img = transforms.ToTensor()(load_img)
            region_mask = 2 * load_img - img.cpu()
            region_mask = torch.sum(region_mask, dim=0) / 3
            region_mask = torch.where(mask > 0, region_mask,
                                      torch.zeros_like(region_mask))

            attack_region_tmp_pil = transforms.ToPILImage()(region_mask.cpu())
            attack_region_tmp_pil.save('013k.png')
            # process mask
            region_mask_new = torch.zeros_like(region_mask).cuda()
            for i in range(segments_label.shape[0]):
                sp = segments_label[i]
                right_color = (torch.where(segments_tensor == sp,
                                           region_mask.cuda(),
                                           one_layer * (-10))).cpu()
                right_color = torch.mean(right_color[right_color != -10])
                color_layer = torch.ones_like(segments_tensor).fill_(
                    right_color)
                region_mask_new = torch.where(segments_tensor == sp,
                                              color_layer, region_mask_new)
            region_mask_new = region_mask_new
            region_mask = region_mask_new
            region_mask_unique = torch.unique(region_mask)

            ATTACK_AREA_RATE = 0.0
            for enlarge_i in range(50):
                ATTACK_AREA_RATE = ATTACK_AREA_RATE + 0.01

                for i in range(region_mask_unique.shape[0]):
                    thres = region_mask_unique[i]
                    # region_mask_tmp = torch.zeros_like(region_mask)
                    region_mask_tmp = torch.where(region_mask > thres,
                                                  one_layer, zero_layer)
                    pixel_num = torch.sum(region_mask_tmp)
                    if pixel_num < mask_area * ATTACK_AREA_RATE:
                        break

                attack_region_search_top = region_mask_tmp
                attack_region_search_top = get_conv_envl(
                    attack_region_search_top)

                attack_region_tmp = attack_region_search_top.cuda()
                print('---------------')
                print('You have used ',
                      float(torch.sum(attack_region_tmp) / mask_area), 'area.')
                print('---------------')
                ## start at gray
                adv_patch_w = torch.zeros(3, 500, 500).cuda()

                adv_patch_w.requires_grad_(True)

                optimizer = optim.Adam([{
                    'params': adv_patch_w,
                    'lr': 0.1
                }],
                                       amsgrad=True)

                t_op_num = 50
                min_max_iou_record = 1
                for t_op_step in range(t_op_num):
                    adv_patch = torch.sigmoid(adv_patch_w)
                    patched_img = torch.where(attack_region_tmp > 0, adv_patch,
                                              img.cuda()).unsqueeze(0)

                    patched_img_255 = patched_img * 255.
                    patched_img_rsz = F.interpolate(patched_img_255,
                                                    (800, 800),
                                                    mode='bilinear').cuda()
                    patched_img_nom_rsz = (patched_img_rsz -
                                           self.mean) / self.std

                    # output
                    img_new = copy.deepcopy(img_frcn)
                    img_new['img'][0] = patched_img_nom_rsz

                    frcn_output = self.Mask_RCNN(return_loss=False,
                                                 rescale=False,
                                                 **img_new)

                    # compute loss
                    proposals_4507 = frcn_output[1]
                    proposals_score_4507 = frcn_output[2]
                    det_bboxes, det_labels, proposals = frcn_output[0]

                    det_bboxes = torch.cat(det_bboxes, dim=0) / 800 * 500
                    proposals = torch.cat(proposals, dim=0) / 800 * 500
                    det_labels = torch.cat(det_labels, dim=0)

                    attack_prob = det_labels[:, class_label]
                    training_confidence_threshold = 0.05
                    ov_thrs_index = torch.where(
                        attack_prob > training_confidence_threshold)[
                            0]  # for certain class
                    pbbox_attack_cls = det_bboxes[:, class_label *
                                                  4:(class_label + 1) * 4]

                    # cls loss
                    attack_class_score = det_labels[:, class_label]
                    top_sort_class_score = torch.sort(attack_class_score,
                                                      descending=True)[0][:20]
                    cls_loss = torch.sum(top_sort_class_score)

                    # iou loss
                    bbox_x1 = bbox[0]
                    bbox_y1 = bbox[1]
                    bbox_w = bbox[2]
                    bbox_h = bbox[3]
                    ground_truth_bbox = [
                        bbox_x1, bbox_y1, bbox_x1 + bbox_w, bbox_y1 + bbox_h
                    ]
                    ground_truth_bbox = torch.Tensor(
                        ground_truth_bbox).unsqueeze(0).cuda()
                    iou_all = compute_iou_tensor(
                        det_bboxes[:, class_label * 4:(class_label + 1) * 4],
                        ground_truth_bbox)
                    iou_positive = iou_all[iou_all > 0.05]
                    iou_loss = torch.sum(iou_all)

                    # class loss selected by IoU
                    attack_class_score = det_labels[:, class_label]
                    attack_class_score_iou = attack_class_score[iou_all > 0.25]
                    attack_class_score_iou_sort = torch.sort(
                        attack_class_score_iou, descending=True)[0][:30]
                    cls_iou_loss = torch.sum(attack_class_score_iou_sort)

                    final_roi = pbbox_attack_cls[
                        ov_thrs_index]  # for certain class
                    final_roi = final_roi[:, :4]
                    final_ctx = (final_roi[:, 0] + final_roi[:, 2]) / 2
                    final_cty = (final_roi[:, 1] + final_roi[:, 3]) / 2
                    final_ctx = final_ctx.unsqueeze(-1)
                    final_cty = final_cty.unsqueeze(-1)
                    final_roi_target = torch.cat(
                        [final_ctx, final_cty, final_ctx, final_cty], dim=-1)
                    reg_loss = 10 * l1_norm(final_roi - final_roi_target
                                            ) / final_roi.shape[0] / 500

                    # RPN loss
                    # r1 : from score
                    # r2 : from x,y,w,h

                    # rpn score target is 0
                    rpn_score = proposals[:, 4]
                    loss_r1 = l2_norm(rpn_score - 0)

                    # rpn box target is smaller the proposal boxes
                    rpn_ctx = (proposals[:, 0] + proposals[:, 2]) / 2
                    rpn_cty = (proposals[:, 1] + proposals[:, 3]) / 2
                    rpn_box = proposals[:, :4]
                    rpn_ctx = rpn_ctx.unsqueeze(-1)
                    rpn_cty = rpn_cty.unsqueeze(-1)
                    rpn_box_target = torch.cat(
                        [rpn_ctx, rpn_cty, rpn_ctx, rpn_cty], dim=-1)
                    # loss_r2 = l1_norm(rpn_score.unsqueeze(-1).repeat(1,4)*(rpn_box - rpn_box_target)) / 500
                    loss_r2 = l1_norm((rpn_box - rpn_box_target)) / 500

                    lambda_balance1 = 0.02
                    # rpn_loss = loss_r1 + lambda_balance1 * loss_r2
                    rpn_loss = lambda_balance1 * loss_r2
                    # rpn_loss = loss_r1

                    total_loss = cls_loss + cls_iou_loss + reg_loss + rpn_loss
                    total_loss = cls_iou_loss + reg_loss + rpn_loss

                    total_loss.backward()
                    optimizer.step()
                    optimizer.zero_grad()

                    # ----------------------------------
                    # ------------------------
                    # early stop

                    #test
                    patched_img_cpu = patched_img.cpu().squeeze()
                    test_confidence_threshold = 0.5
                    iou_threshold = 0.5

                    ov_test_thrs_index = torch.where(
                        attack_prob > test_confidence_threshold)[0]

                    final_pbbox = det_bboxes[:, class_label *
                                             4:(class_label + 1) * 4]
                    ground_truth_bboxs_final = ground_truth_bbox.repeat(
                        final_pbbox.shape[0], 1)
                    iou = compute_iou_tensor(final_pbbox,
                                             ground_truth_bboxs_final)
                    attack_prob_select_by_iou_ = attack_prob[
                        iou > iou_threshold]
                    attack_prob_select_by_iou_ = attack_prob_select_by_iou_[
                        attack_prob_select_by_iou_ > test_confidence_threshold]

                    # break if attack success

                    # stop if no such class found
                    if attack_prob_select_by_iou_.shape[0] == 0:
                        print('Break at', t_op_step, 'no bbox found')
                        # save image

                        txt_save_dir = '../common_data/NES_attack/disappear/FRCN/area'
                        txt_save_path = os.path.join(
                            txt_save_dir,
                            img_name.split('.')[0] + '.txt')
                        with open(txt_save_path, 'w') as f:
                            text = str(
                                float(
                                    torch.sum(attack_region_tmp).cpu() /
                                    mask_area))
                            f.write(text)

                        patched_img_cpu_pil = transforms.ToPILImage()(
                            patched_img_cpu)
                        out_file_path = os.path.join(
                            '../common_data/NES_attack/disappear/FRCN/img',
                            img_name)
                        patched_img_cpu_pil.save(out_file_path)
                        break
                if attack_prob_select_by_iou_.shape[0] == 0:
                    # stop enlarge
                    break
示例#3
0
    def train(self):
        """
        Optimize a patch to generate an adversarial example.
        :return: Nothing
        """

        img_size = 800
        batch_size = 1
        n_epochs = 5000
        max_lab = 14

        ATTACK_TASK = 'target'

        # TARGET_CLASS = 'dog'
        TARGET_CLASS = 16
        # ATTACK_TASK = 'untarget'

        time_str = time.strftime("%Y%m%d-%H%M%S")

        conv_size = 2
        kernel1 = gkern(2 * conv_size + 1, 3).astype(np.float32)
        stack_kernel1 = np.stack([kernel1, kernel1, kernel1]).swapaxes(2, 0)
        stack_kernel1 = np.expand_dims(stack_kernel1, 3)
        stack_kernel1 = torch.from_numpy(stack_kernel1).permute(2, 3, 0,
                                                                1).float()

        # Dataset prepare

        data_obj = CocoTrainPerson(dataType='train2017', num_use=500)
        dataloader_obj = DataLoader(data_obj, batch_size=1,
                                    shuffle=False)  #使用DataLoader加载数据

        # img info prepare
        img_frcn = get_Image_ready(self.Faster_RCNN.module, '1016.png')
        img_frcn['img_metas'][0][0]['filename'] = None
        img_frcn['img_metas'][0][0]['ori_filename'] = None
        img_frcn['img_metas'][0][0]['ori_shape'] = None
        img_frcn['img_metas'][0][0]['pad_shape'] = None
        img_frcn['img_metas'][0][0]['scale_factor'] = None

        # attack_area_rate = 0.2
        ATTACK_AREA_RATE = 0.1
        evo_step_num = 20
        batch_size_sp = 12
        population_num = 72  # 36
        optim_step_num = 300
        k = 0
        for i_batch, batch_data in enumerate(dataloader_obj):
            img, mask, bbox, class_label = batch_data[0][0], batch_data[1][
                0], batch_data[2][0], batch_data[3][0]
            # img  : 3,500,500
            # mask : 500,500
            # bbox : x1,y1,w,h
            # class_label : tensor[]

            img_name = batch_data[4][0]

            print('---------------')
            print(img_name)
            print('---------------')

            save_dir = os.path.join('../common_data/NES_search_test_1107/',
                                    img_name.split('.')[0])
            if os.path.exists(save_dir):
                continue
            time0 = time.time()

            # use segment SLIC
            base_SLIC_seed_num = 3000
            img_np = img.numpy().transpose(1, 2, 0)
            mask_np = mask.numpy()
            numSegments = int(base_SLIC_seed_num / (500 * 500) *
                              torch.sum(mask))
            segments_np = slic(image=img_np,
                               n_segments=numSegments,
                               sigma=0,
                               slic_zero=True,
                               mask=mask_np)
            segments_tensor = torch.from_numpy(segments_np).float().cuda()
            segments_label = torch.unique(segments_tensor)
            segments_label = segments_label[1:]

            # define theta_m
            # pay attention to the center and the boundary

            # (0) prepare stack of sp
            # (1) find the center sp
            # (2) find the boundary sp

            # (0) prepare stack of sp
            zero_layer = torch.zeros_like(segments_tensor)
            one_layer = torch.ones_like(segments_tensor)
            segments_stack = torch.stack([
                torch.where(segments_tensor == segments_label[j],
                            segments_tensor, zero_layer)
                for j in range(segments_label.shape[0])
            ],
                                         dim=0)
            segments_stack = segments_stack.cpu()

            mask_area = torch.sum(mask)
            # (1) find the center sp
            bbox_x1 = bbox[0]
            bbox_y1 = bbox[1]
            bbox_w = bbox[2]
            bbox_h = bbox[3]

            bbox_x_c = bbox_x1 + bbox_w / 2
            bbox_y_c = bbox_y1 + bbox_h / 2
            bbox_x_c_int = int(bbox_x_c)
            bbox_y_c_int = int(bbox_y_c)

            # if segments_tensor[bbox_y_c_int, bbox_x_c_int] == 0:
            #     # no sp in center
            #     center_sp = torch.Tensor().cuda()
            # else:
            #     center_sp = segments_tensor[bbox_y_c_int, bbox_x_c_int].unsqueeze(0)
            #     center_sp_layer = torch.where(segments_tensor==center_sp, one_layer, zero_layer)
            #     center_sp_layer_np = center_sp_layer.cpu().numpy()
            #     kernel = np.ones((3,3),np.uint8)
            #     center_sp_layer_dilate = cv2.dilate(center_sp_layer_np, kernel, iterations = 2)
            #     center_sp_layer_dilate = torch.from_numpy(center_sp_layer_dilate)
            #     center_sp_layer_dilate_stack = center_sp_layer_dilate.unsqueeze(0).repeat(segments_stack.shape[0],1,1)

            #     cross_stack = center_sp_layer_dilate_stack * segments_stack
            #     cross_ = torch.sum(cross_stack, dim=0)
            #     neighborhoods = torch.unique(cross_)[1:]
            #     center_sps = neighborhoods
            #     cross_stack = cross_stack.cpu()
            #     center_sp_layer_dilate_stack = center_sp_layer_dilate_stack.cpu()

            #     # we also need center sp's neighborhoods

            # # (2) find the boundary sp

            # # boundary_erode
            # kernel = np.ones((3,3),np.uint8)
            # mask_erosion = cv2.erode(mask_np, kernel, iterations = 2)
            # boundary_erode = mask_np - mask_erosion
            # boundary_erode = torch.from_numpy(boundary_erode)
            # # boundary_erode_pil = transforms.ToPILImage()(boundary_erode.cpu())
            # # boundary_erode_pil.show()

            # boundary_erode_stack = boundary_erode.unsqueeze(0).repeat(segments_stack.shape[0],1,1)
            # boundary_mul_segments_stack = boundary_erode_stack * segments_stack
            # boundary_mul_segments = torch.sum(boundary_mul_segments_stack, dim=0)
            # # boundary_mul_segments_pil = transforms.ToPILImage()(boundary_mul_segments.cpu())
            # # boundary_mul_segments_pil.show()
            # boundary_mul_segments_unique = torch.unique(boundary_mul_segments)
            # boundary_mul_segments_unique = boundary_mul_segments_unique[1:]
            # boundary_sp = boundary_mul_segments_unique

            # shan dian
            # init grid
            densy = 7
            unit_w = 13 * densy
            unit_h = 13 * densy
            sandian = torch.zeros(unit_w, unit_h)
            '''
            log:
            10,5,10,5 : 0.04   work! at 700
            10,5,10,6 : 0.0333 work! at 2040
            '''
            sandian = sandian.reshape(13, densy, 13, densy)
            sandian[:, int((densy - 1) / 2), :, int((densy - 1) / 2)] = 1
            sandian = sandian.reshape(unit_w, unit_h)
            sandian = sandian.unsqueeze(0).unsqueeze(0)
            sandian = F.interpolate(sandian, (500, 500),
                                    mode='nearest').squeeze()
            sandian_stack = sandian.unsqueeze(0).repeat(
                segments_stack.shape[0], 1, 1)
            sandian_mul_segments_stack = sandian_stack * segments_stack
            sandian_mul_segments = torch.sum(sandian_mul_segments_stack, dim=0)
            sandian_mul_segments_pil = transforms.ToPILImage()(
                sandian_mul_segments.cpu())
            sandian_mul_segments_pil.show()
            sandian_mul_segments_unique = torch.unique(sandian_mul_segments)
            sandian_mul_segments_unique = sandian_mul_segments_unique[1:]
            sandian_sp = sandian_mul_segments_unique

            # pay attention
            # spot_sp = torch.cat((center_sps, boundary_sp), dim=0)
            spot_sp = sandian_sp
            spot_sp = torch.unique(spot_sp).long()

            sandian_stack = sandian_stack.cpu()
            # boundary_erode_stack = boundary_erode_stack.cpu()
            # boundary_mul_segments_stack = boundary_mul_segments_stack.cpu()
            segments_stack = segments_stack.cpu()

            torch.cuda.empty_cache()

            # show_tensor = img.clone().cuda()
            # for i in range(spot_sp.shape[0]):
            #     sp = spot_sp[i]
            #     show_tensor = torch.where(segments_tensor==sp, zero_layer.fill_(1).unsqueeze(0).repeat(3,1,1),show_tensor)

            # show_tensor_pil = transforms.ToPILImage()(show_tensor.cpu())
            # show_tensor_pil.show()

            # generate theta_m
            # for sp id from 1 to 128
            uniform_ratio = torch.Tensor([ATTACK_AREA_RATE])[0]
            higher_ratio = uniform_ratio * 1.5
            uniform_ratio_theta = 1 / 2 * torch.log(uniform_ratio /
                                                    (1 - uniform_ratio))
            higher_ratio_theta = 1 / 2 * torch.log(higher_ratio /
                                                   (1 - higher_ratio))
            theta_m = torch.zeros_like(segments_label).cpu().fill_(
                uniform_ratio_theta)
            theta_m[spot_sp - 1] = higher_ratio_theta

            for evo_step in range(evo_step_num):
                # prepare sp dataset
                g_theta_m = 1 / 2 * (torch.tanh(theta_m) + 1)
                data_sp = SuperPixelGet(segments_label=segments_label,
                                        segments_tensor=segments_tensor,
                                        g_theta_m=g_theta_m,
                                        data_num=population_num)
                dataloader_sp = DataLoader(data_sp,
                                           batch_size=batch_size_sp,
                                           shuffle=False)  #使用DataLoader加载数据

                F_value_restore = torch.Tensor()
                select_m_restore = torch.Tensor()

                for j_sp_batch, sp_batch_data in enumerate(dataloader_sp):

                    attack_mask_batch, select_m = sp_batch_data[0].unsqueeze(
                        1), sp_batch_data[1]
                    select_m_restore = torch.cat(
                        (select_m_restore, select_m.cpu()))

                    batch_size_now = attack_mask_batch.shape[0]
                    ## start at gray
                    adv_patch_w_batch = torch.zeros(batch_size_now, 3, 500,
                                                    500).cuda()

                    adv_patch_w_batch.requires_grad_(True)

                    ## optimizer and scheduler
                    # optimizer = optim.Adam([
                    #     {'params': adv_patch, 'lr': 0.01*255}
                    # ], amsgrad=True)
                    optimizer = optim.Adam([{
                        'params': adv_patch_w_batch,
                        'lr': 0.1
                    }],
                                           amsgrad=True)

                    L_value_step = torch.ones(batch_size_now,
                                              optim_step_num) * 1000
                    for step in tqdm(range(optim_step_num)):

                        # prepare batch data to feed the frcn
                        img_batch = img.cuda().unsqueeze(0).repeat(
                            batch_size_now, 1, 1, 1)
                        adv_patch_batch = torch.sigmoid(adv_patch_w_batch / 2)
                        patched_img_batch = torch.where(
                            attack_mask_batch > 0, adv_patch_batch, img_batch)
                        patched_img_batch_255 = patched_img_batch * 255.
                        patched_img_batch_rsz = F.interpolate(
                            patched_img_batch_255, (800, 800),
                            mode='bilinear').cuda()
                        patched_img_batch_nom_rsz = (patched_img_batch_rsz -
                                                     self.mean) / self.std

                        # output
                        img_new = copy.deepcopy(img_frcn)
                        img_new['img'][0] = patched_img_batch_nom_rsz
                        # [0] = patched_img_batch_nom_rsz
                        img_new['img_metas'][0] = [
                            img_new['img_metas'][0][0]
                            for i in range(batch_size_now)
                        ]
                        frcn_output = self.Faster_RCNN(return_loss=False,
                                                       rescale=False,
                                                       **img_new)
                        # output formate is [x1,y1,x2,y2]

                        # compute loss
                        proposals_4507 = frcn_output[1]
                        proposals_score_4507 = frcn_output[2]
                        det_bboxes, det_labels, proposals = frcn_output[0]

                        det_bboxes = torch.cat(det_bboxes, dim=0) / 800 * 500
                        proposals = torch.cat(proposals, dim=0) / 800 * 500
                        det_labels = torch.cat(det_labels, dim=0)

                        attack_prob = det_labels[:, class_label]
                        training_confidence_threshold = 0.28
                        ov_thrs_index = torch.where(
                            attack_prob > training_confidence_threshold)[
                                0]  # for certain class
                        pbbox_attack_cls = det_bboxes[:, class_label *
                                                      4:(class_label + 1) * 4]

                        # cls loss
                        attack_class_score = det_labels[:, class_label]
                        top_sort_class_score = torch.sort(
                            attack_class_score, descending=True)[0][:10]
                        cls_loss = torch.sum(top_sort_class_score)

                        # iou loss
                        bbox_x1 = bbox[0] / 500 * 416
                        bbox_y1 = bbox[1] / 500 * 416
                        bbox_w = bbox[2] / 500 * 416
                        bbox_h = bbox[3] / 500 * 416
                        ground_truth_bbox = [
                            bbox_x1, bbox_y1, bbox_x1 + bbox_w,
                            bbox_y1 + bbox_h
                        ]
                        ground_truth_bbox = torch.Tensor(
                            ground_truth_bbox).unsqueeze(0).cuda()
                        iou_all = compute_iou_tensor(
                            det_bboxes[:,
                                       class_label * 4:(class_label + 1) * 4],
                            ground_truth_bbox)
                        iou_positive = iou_all[iou_all > 0.15]
                        iou_loss = torch.sum(iou_all)

                        # class loss selected by IoU
                        attack_class_score = det_labels[:, class_label]
                        attack_class_score_iou = attack_class_score[
                            iou_all > 0.45]
                        attack_class_score_iou_sort = torch.sort(
                            attack_class_score_iou, descending=True)[0][:30]
                        cls_iou_loss = torch.sum(attack_class_score_iou_sort)

                        final_roi = pbbox_attack_cls[
                            ov_thrs_index]  # for certain class
                        final_roi = final_roi[:, :4]
                        final_ctx = (final_roi[:, 0] + final_roi[:, 2]) / 2
                        final_cty = (final_roi[:, 1] + final_roi[:, 3]) / 2
                        final_ctx = final_ctx.unsqueeze(-1)
                        final_cty = final_cty.unsqueeze(-1)
                        final_roi_target = torch.cat(
                            [final_ctx, final_cty, final_ctx, final_cty],
                            dim=-1)
                        reg_loss = 10 * l1_norm(final_roi - final_roi_target
                                                ) / final_roi.shape[0] / 500

                        # RPN loss
                        # r1 : from score
                        # r2 : from x,y,w,h

                        # rpn score target is 0
                        rpn_score = proposals[:, 4]
                        loss_r1 = l2_norm(rpn_score - 0)

                        # rpn box target is smaller the boxes
                        rpn_ctx = (proposals[:, 0] + proposals[:, 2]) / 2
                        rpn_cty = (proposals[:, 1] + proposals[:, 3]) / 2
                        rpn_box = proposals[:, :4]
                        rpn_ctx = rpn_ctx.unsqueeze(-1)
                        rpn_cty = rpn_cty.unsqueeze(-1)
                        rpn_box_target = torch.cat(
                            [rpn_ctx, rpn_cty, rpn_ctx, rpn_cty], dim=-1)
                        # loss_r2 = l1_norm(rpn_score.unsqueeze(-1).repeat(1,4)*(rpn_box - rpn_box_target)) / 500
                        loss_r2 = l1_norm((rpn_box - rpn_box_target)) / 500

                        lambda_balance1 = 0.02
                        # rpn_loss = loss_r1 + lambda_balance1 * loss_r2
                        rpn_loss = lambda_balance1 * loss_r2
                        # rpn_loss = loss_r1

                        total_loss = cls_loss + reg_loss + rpn_loss
                        total_loss = cls_loss + cls_iou_loss + reg_loss + rpn_loss

                        # if epoch > 500:
                        #     total_loss = rpn_loss + cls_loss_new + reg_loss

                        total_loss.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                        # compute F(m,t*;y)

                        # L in disappearing
                        # i think we better use iou>0.45 in prediction, all bbox's person class confidence sum

                        bbox_x1 = bbox[0]
                        bbox_y1 = bbox[1]
                        bbox_w = bbox[2]
                        bbox_h = bbox[3]
                        ground_truth_bbox = [
                            bbox_x1, bbox_y1, bbox_x1 + bbox_w,
                            bbox_y1 + bbox_h
                        ]
                        ground_truth_bbox = torch.Tensor(
                            ground_truth_bbox).unsqueeze(0).cuda()
                        iou_all = compute_iou_tensor(
                            det_bboxes[:,
                                       class_label * 4:(class_label + 1) * 4],
                            ground_truth_bbox)

                        iou_select_index = iou_all > 0.45

                        for ib in range(batch_size_now):

                            det_labels_one = det_labels[ib:(ib + 1) * 1000,
                                                        class_label]

                            iou_select_index_one = iou_select_index[ib:(ib +
                                                                        1) *
                                                                    1000]

                            cls_conf_iou_select = det_labels_one[
                                iou_select_index_one]

                            cls_conf_iou_select = cls_conf_iou_select[
                                cls_conf_iou_select > 0.25]

                            cls_conf_iou_select_top = torch.sort(
                                cls_conf_iou_select, descending=True)[0][:10]

                            L_value_step[ib, step] = torch.sum(
                                cls_conf_iou_select_top).detach().clone()
                            pass
                        if (torch.min(L_value_step,
                                      dim=1)[0] == torch.zeros(batch_size_now)
                            ).all():
                            break

                        pass
                    L_value = -torch.min(L_value_step, dim=1)[0]
                    F_value = L_value - torch.sum(
                        torch.sum(sp_batch_data[0], dim=-1),
                        dim=-1).cpu() / mask_area * 10  ###!!!!
                    F_value_restore = torch.cat((F_value_restore, F_value))

                    pass
                # print(F_value_restore)
                # now we have F value

                delta_J_theta = 1 / population_num * F_value_restore.unsqueeze(
                    1) * 2 * (select_m_restore.cpu().float() - g_theta_m)

                delta_J_theta = torch.sum(delta_J_theta, dim=0)
                theta_m = theta_m + delta_J_theta
                g_theta_m = 1 / 2 * (torch.tanh(theta_m) + 1)

                select_sp_index = torch.sort(
                    theta_m)[1][:int(theta_m.shape[0] * ATTACK_AREA_RATE)]

                attack_region_show = zero_layer.clone().squeeze().cuda()
                flag = torch.zeros_like(segments_label)
                flag[select_sp_index] = 1
                for i in range(segments_label.shape[0]):

                    color = g_theta_m[i]
                    color_layer = torch.zeros_like(segments_tensor).fill_(
                        color)
                    sp = segments_label[i]

                    attack_region_show = torch.where(segments_tensor == sp,
                                                     color_layer,
                                                     attack_region_show)
                attack_region_show = attack_region_show / torch.max(
                    attack_region_show)

                attack_region_show = (attack_region_show + img.cuda()) / 2

                attack_region_show_pil = transforms.ToPILImage()(
                    attack_region_show.cpu())

                save_dir = os.path.join('../common_data/NES_search_test_1107/',
                                        img_name.split('.')[0])
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                save_path = os.path.join(
                    save_dir,
                    str(evo_step) + '_pop_' + str(population_num) + '_' +
                    str(base_SLIC_seed_num) + '.png')
                attack_region_show_pil.save(save_path)

                time1 = time.time()

                time_cost = time1 - time0

                if time_cost > 1800:
                    break

                print(g_theta_m)
        print(assasasasasasa)
        pass