Esempio n. 1
0
def generate_un_ad_sample(img_path, img_label):
    """
    Args Type: str, int

    Result path: '../generated/adv_image_name'
    """

    pretrained_model = models.resnet18(pretrained=True)
    FGS_untargeted = FastGradientSignUntargeted(pretrained_model, 0.01)
    (original_image, prep_img, target_class) = get_params(img_path, img_label)

    img_name = img_path.split('/')[-1]
    gampicpath = FGS_untargeted.generate(original_image, target_class, img_name)
    return gampicpath
def generate_tar_ad_sample(img_path, img_label):
    """
    Args Type: str, int

    Result path: '../generated/adv_image_name'
    """
    #print(1)
    pretrained_model = models.resnet18(pretrained=True)
    FGS_untargeted = FastGradientSignTargeted(pretrained_model, 0.01)
    original_image, prep_img, org_class = get_params(img_path, img_label)
    #print(type(original_image), type(prep_img), type(org_class))
    target_class = org_class + 100
    if (target_class > 999):
        target_class = target_class % 1000

    img_name = img_path.split('/')[-1]
    #print(img_name)
    gampic = FGS_untargeted.generate(original_image, org_class, target_class,
                                     img_name)
    #print(gampic)
    return gampic
Esempio n. 3
0
        model_output = self.model(self.input_image)
        # Zero grads
        self.model.zero_grad()
        # Target for backprop
        one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
        one_hot_output[0][self.target_class] = 1
        # Backward pass
        model_output.backward(gradient=one_hot_output)
        # Convert Pytorch variable to numpy array
        # [0] to get rid of the first channel (1,3,224,224)
        gradients_as_arr = self.gradients.data.numpy()[0]
        return gradients_as_arr


if __name__ == '__main__':
    # Get params
    target_example = 0  # Snake
    (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
        get_params(target_example)
    # Vanilla backprop
    VBP = VanillaBackprop(pretrained_model, prep_img, target_class)
    # Generate gradients
    vanilla_grads = VBP.generate_gradients()
    # Save colored gradients
    save_gradient_images(vanilla_grads, file_name_to_export + '_Vanilla_BP_color')
    # Convert to grayscale
    grayscale_vanilla_grads = convert_to_grayscale(vanilla_grads)
    # Save grayscale gradients
    save_gradient_images(grayscale_vanilla_grads, file_name_to_export + '_Vanilla_BP_gray')
    print('Vanilla backprop completed')
                                                                            tv_reg_beta)
            # Sum all to optimize
            loss = euc_loss + reg_alpha + reg_total_variation
            # Step
            loss.backward()
            optimizer.step()
            # Generate image every 5 iterations
            if i % 5 == 0:
                print('Iteration:', str(i), 'Loss:', loss.data.numpy()[0])
                x = recreate_image(opt_img)
                cv2.imwrite('../generated/Inv_Image_Layer_' + str(target_layer) +
                            '_Iteration_' + str(i) + '.jpg', x)
            # Reduce learning rate every 40 iterations
            if i % 40 == 0:
                for param_group in optimizer.param_groups:
                    param_group['lr'] *= 1/10


if __name__ == '__main__':
    # Get params
    target_example = 0  # Snake
    (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
        get_params(target_example)

    inverted_representation = InvertedRepresentation(pretrained_model)
    image_size = 224  # width & height
    target_layer = 12
    inverted_representation.generate_inverted_image_specific_layer(prep_img,
                                                                   image_size,
                                                                   target_layer)
                        default="../dataset/msra/valid/label/im100.png")
    parser.add_argument('--model_path', type=str, default="./runs/debug/")
    parser.add_argument('--fold', type=str, default="unet16")
    parser.add_argument('--model_type', type=str, default="UNet16")

    args = parser.parse_args()
    image_path = args.image_path
    mask_path = args.mask_path
    model_path = str(
        Path(args.model_path).joinpath(
            'model_{fold}.pt'.format(fold=args.fold)))
    model_type = args.model_type

    (original_image, prep_img, prep_mask, file_name_to_export,
     pretrained_model, img_width,
     img_height) = get_params(image_path, mask_path, model_path, model_type)

    # Vanilla backprop
    VBP = VanillaBackprop(pretrained_model)

    # Generate gradients
    vanilla_grads = VBP.generate_gradients(prep_img, prep_mask)
    print(vanilla_grads.shape)

    # Save colored gradients
    # save_gradient_images(vanilla_grads, file_name_to_export + '_Vanilla_BP_color', img_width, img_height)

    # Convert to grayscale
    grayscale_vanilla_grads = convert_to_grayscale(vanilla_grads)

    # Save grayscale gradients
Esempio n. 6
0
                    '../generated/targeted/adv_img_from_' + image_path + '_' +
                    str(org_class) + '_to_' + str(confirmation_prediction) +
                    '.jpg', recreate_img)

                #FGSM 생성한 이미지의 Xception 모델 결과값(Fake/Real).
                #deepfake_output = self.model(torch.Tensor(recreate_img))
                deepfake_output = self.model(
                    processed_image
                )  #torch.from_numpy(np.flip(recreate_img, axis=0).copy())
                _, FGSM_result = deepfake_output.data.max(1)
                print("FGSM result : ", FGSM_result.numpy()[0])
                break
        return 1


#targeted 말구 untargeted 로 하면 안되려나..?!
#내가 생각한 생성방법이랑 너무 다른듯... 모든 이미지에 해당하는 AE 가 생성되어야 하는거 아닌가?
if __name__ == '__main__':
    print("------------ Targeted FGSM 실행 ----------")
    folder_pth = 'D:/full_data/test/real'
    target_class = 1  #fake 라 1임.
    for i in range(len(os.listdir(folder_pth))):
        #전체 파일 리스트가 for문 동안 계속 왔다갔다하면 비효율적인데...
        (original_image, prep_img, org_class, _, pretrained_model,
         img_path) = get_params(i)
        FGS_untargeted = FastGradientSignTargeted(pretrained_model, 0.02)
        FGS_untargeted.generate(original_image, org_class, target_class,
                                img_path)

    #FGSM -> model 에 넣어서 결과 출력