コード例 #1
0
ファイル: attack.py プロジェクト: liangxun/GraphDroid
def worker(args):
    alg, max_bit, model_file = args
    model = load_pretrain_model(model_file)
    data_loader = load_data_for_adv(
        os.path.join(data_dir, 'baseline_dataset.pkl'))
    print("=============attack algorithm:{} max_bit:{} ======START=========".
          format(alg, max_bit))
    r_codes = adv_attack(model, data_loader, max_bit, alg)
    report = {
        'alg': alg,
        'max_bit': max_bit,
        'model_file': model_file,
        'r_codes': r_codes,
    }
    print("=============attack algorithm:{} max_bit:{} ======END=========".
          format(alg, max_bit))
    return report
コード例 #2
0
###另一个主函数里的
# -*- coding: UTF-8 -*-
import cv2 as cv
import argparse
import numpy as np
import time
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize
parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')
parser.add_argument('--video', default='Escalator/xa_0051.mp4',help='Path to video file.')
args = parser.parse_args()

# 导入相关模型 建立图
# estimator = load_pretrain_model('VGG_origin') #返回一个估计的模型
estimator = load_pretrain_model('mobilenet_thin')  #返回一个类的句柄TfPoseVisualizer 并且建立了计算图
# action_classifier = load_action_premodel('Action/Es_all_demo.h5') #返回动作分类模型 且里面定义了tracker
action_classifier = load_action_premodel('Action/framewise_recognition_bobei.h5') #返回动作分类模型 且里面定义了tracker

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

# #读写视频文件(仅测试过webcam输入)
# cap = choose_run_mode(args) #选择摄像头或者是本地文件
# video_writer = set_video_writer(cap, write_fps=int(12)) #保存到本地的视频用到的参数初始化
# video_1 = cv.VideoWriter('test_out/xn_0007.mp4',
コード例 #3
0
# -*- coding: UTF-8 -*-
import cv2 as cv
import argparse
import numpy as np
import time
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize

parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()

# 모델 불러오기
estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/taekwondo_recognition.h5')

# 매개변수 초기화
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

# video file 읽기 및 쓰기
cap = choose_run_mode(args)
video_writer = set_video_writer(cap, write_fps=int(7.0))

# skeleton data for Training을 txt 파일로 저장
# f = open('data\\txt_data\\taekwondoskill2-3-1.txt', 'a+')
import cv2 as cv
import argparse
import numpy as np
import time
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize

import lstm_pred

parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()

# 导入相关模型
estimator = load_pretrain_model('mobilenet_thin')
#action_classifier = load_action_premodel('Action/framewise_recognition.h5')
action_classifier = load_action_premodel("LSTM/lstm_model.h5")

# 参数初始化
#realtime_fps = '0.0000'
realtime_fps = 12
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

# 读写视频文件(仅测试过webcam输入)
cap = choose_run_mode(args)
video_writer = set_video_writer(cap, write_fps=int(7.0))
コード例 #5
0
import saliency
import PIL
from deepexplain.tensorflow import DeepExplain
from utils import plot
from collections import OrderedDict
import pickle

args = parse_arguments('')
model_name = args.model_name
img_path = args.img_path
img_label_path = 'imagenet.json'
true_class = args.true_label
adversarial_label = args.adv_label
label_num = args.label_num
lambda_up, lambda_down, lambda_label_loss = args.lambda_up, args.lambda_down, args.lambda_label_loss
sess, graph, img_size, images_pl, logits = load_pretrain_model(model_name,
                                                               is_explain=True)
y_label = tf.placeholder(dtype=tf.int32, shape=())
img_label = load_imagenet_label(img_label_path)

img = PIL.Image.open(img_path)
img = preprocess_img(img, img_size)
#new_img = np.load('big_vgg16_30_0.0001_1000_0.001_0.03_3000.npy') # 258
new_img = np.load('vgg16_60_70_35_45_30_0.0001_800_0.0_0.0_9000.npy')  # 208

batch_img = np.expand_dims(img, 0)
new_batch_img = np.expand_dims(new_img, 0)

true_class = 208
label_logits = logits[0, true_class]
gradient_saliency = saliency.GradientSaliency(graph, sess, label_logits,
                                              images_pl)  # 1951/1874
コード例 #6
0
def main():

    ######### config  ###########

    configs = config()
    pretrain_vgg16_path = os.path.join(configs.py_dir,
                                       'model/vgg16_from_caffe.pth')

    ########  load training data ########
    ######### imagenet ############
    '''''' '''
   train_img_dir = os.path.join(configs.data_dir, 'img/train')
   train_label_dir = os.path.join(configs.data_dir,'label/map_clsloc.txt')
   val_img_dir = os.path.join(configs.data_dir, 'img/val')
   val_txt_dir = os.path.join(configs.data_dir, 'img/val.txt')
   val_label_dir = os.path.join(configs.data_dir,'label/ILSVRC2012_validation_ground_truth.txt')

   normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])

   transform = transforms.Compose([
       transforms.RandomSizedCrop(224),
       transforms.RandomHorizontalFlip(),
       transforms.ToTensor(),
       normalize
   ])

   train_data = imgnet.ImagenetDataset(img_path=train_img_dir,label_path=train_label_dir,
                                       file_name_txt_path='',split_flag='train',transform=transform)

   train_loader = Data.DataLoader(train_data,batch_size=configs.batch_size,
                                  shuffle=True, num_workers=2, pin_memory= True)

   val_data = imgnet.ImagenetDataset(img_path=val_img_dir,label_path=val_label_dir,
                                     file_name_txt_path= val_txt_dir, split_flag='valid', transform=transform)

   val_loader = Data.DataLoader(val_data, batch_size=configs.batch_size,
                                shuffle= False, num_workers= 2, pin_memory= True)
   ''' ''''''

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=128,
                                               shuffle=True,
                                               num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    val_loader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)

    ######### build vgg model ##########

    vgg_cam = models.vgg_cam()
    vgg_pretrain_model = utils.load_pretrain_model(pretrain_vgg16_path)
    vgg_cam.copy_params_from_pretrain_vgg(vgg_pretrain_model,
                                          init_fc8=configs.init_random_fc8)
    vgg_cam = vgg_cam.cuda()

    ########## optim  ###########

    #optimizer = torch.optim.SGD(vgg_cam.parameters(),lr=configs.learning_rate,momentum=configs.momentum)
    optimizer = torch.optim.Adam(vgg_cam.parameters(),
                                 lr=configs.learning_rate,
                                 weight_decay=configs.weight_decay)
    loss_fun = nn.CrossEntropyLoss()

    for epoch in range(20):

        for step, (img_x, label_x) in enumerate(train_loader):

            img, label = Variable(img_x.cuda()), Variable(label_x.cuda())
            predict, _ = vgg_cam(img)
            loss = loss_fun(predict, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (step) % 20 == 0:
                print("Epoch [%d/%d] Loss: %.4f" % (epoch, step, loss.data[0]))

            if (step) % configs.save_ckpoints_iter_number == 0:

                torch.save(
                    vgg_cam,
                    os.path.join(configs.save_ckpt_dir,
                                 'cam' + str(step) + '.pkl'))

            if step % configs.validate_iter_number == 0:

                test(vgg_cam, val_loader, loss_fun)
コード例 #7
0
def main():

    ######### config  ###########

    best_metric = 0
    pretrain_vgg16_path = os.path.join(configs.py_dir,
                                       'model/vgg16_from_caffe.pth')

    ########  load training data ########
    ######### action 40  ############

    normalize = trans.Normalize(mean=[0.4001, 0.4401, 0.4687],
                                std=[0.229, 0.224, 0.225])
    #std=[1, 1, 1])
    train_transform = trans.Compose([
        trans.RandomCrop(224, padding=4),
        trans.RandomHorizontalFlip(),
        trans.ToTensor(),
        normalize,
    ])

    val_transform = trans.Compose([
        trans.Scale((224, 224)),
        trans.ToTensor(),
        normalize,
    ])

    train_data = imgfolder.ImageFolder(os.path.join(configs.data_dir,
                                                    'img/train'),
                                       transform=train_transform)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=configs.batch_size,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=True)

    val_data = imgfolder.ImageFolder(os.path.join(configs.data_dir, 'img/val'),
                                     transform=val_transform)
    val_loader = Data.DataLoader(val_data,
                                 batch_size=configs.batch_size,
                                 shuffle=False,
                                 num_workers=4,
                                 pin_memory=True)

    ######### build vgg model ##########

    vgg_cam = models.vgg_cam()
    vgg_pretrain_model = utils.load_pretrain_model(pretrain_vgg16_path)
    vgg_cam.copy_params_from_pretrain_vgg(vgg_pretrain_model,
                                          init_fc8=configs.init_random_fc8)
    vgg_cam = vgg_cam.cuda()

    ########  resume  ###########
    if resume:
        checkpoint = torch.load(
            '/media/cheer/2T/train_pytorch/cam/ckpt/model_best.pth')
        vgg_cam.load_state_dict(checkpoint['state_dict'])
    ########## optim  ###########

    optimizer = torch.optim.SGD(vgg_cam.parameters(),
                                lr=configs.learning_rate,
                                momentum=configs.momentum,
                                weight_decay=configs.weight_decay)
    #optimizer = torch.optim.Adam(vgg_cam.parameters(),lr=configs.learning_rate,weight_decay=configs.weight_decay)
    loss_fun = nn.CrossEntropyLoss()

    for epoch in range(200):

        adjust_learning_rate(optimizer, epoch)
        for step, (img_x, label_x) in enumerate(train_loader):

            img, label = Variable(img_x.cuda()), Variable(label_x.cuda())
            predict, _ = vgg_cam(img)
            loss = loss_fun(predict, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (step) % 20 == 0:
                print("Epoch [%d/%d] Loss: %.4f" % (epoch, step, loss.data[0]))

        current_metric = test(vgg_cam, val_loader, loss_fun)

        if current_metric > best_metric:

            torch.save({'state_dict': vgg_cam.state_dict()},
                       os.path.join(configs.save_ckpt_dir,
                                    'cam' + str(epoch) + '.pth'))

            shutil.copy(
                os.path.join(configs.save_ckpt_dir,
                             'cam' + str(epoch) + '.pth'),
                os.path.join(configs.save_ckpt_dir, 'model_best.pth'))
            best_metric = current_metric

        if epoch % 10 == 0:

            torch.save({'state_dict': vgg_cam.state_dict()},
                       os.path.join(configs.save_ckpt_dir,
                                    'cam' + str(epoch) + '.pth'))
コード例 #8
0
    elif configs.learner.lower() == "rmsprop":
        model.compile(optimizer=optimizers.RMSprop(lr=configs.learning_rate), loss='binary_crossentropy')
    elif configs.learner.lower() == "adam":
        model.compile(optimizer=optimizers.Adam(lr=configs.learning_rate), loss='binary_crossentropy')
    else:
        model.compile(optimizer=optimizers.SGD(lr=configs.learning_rate), loss='binary_crossentropy')

    # -----------Load pretrain model-----------
    if configs.mf_pretrain != '' and configs.mlp_pretrain != '':
        gmf_model = GMF(num_users, num_items, configs.mf_dim)
        gmf_model.build(input_shape=([1, 1]))
        gmf_model.load_weights(configs.mf_pretrain)
        mlp_model = MLP(num_users, num_items, configs.layers, configs.reg_layers)
        mlp_model.build(input_shape=([1, 1]))
        mlp_model.load_weights(configs.mlp_pretrain)
        model = load_pretrain_model(model, gmf_model, mlp_model, len(configs.layers))

    # ---------------Init performance----------------
    (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, configs.topK)
    hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
    print('Init: HR = %.4f, NDCG = %.4f [%.1f s]' % (hr, ndcg, time() - t1))
    best_hr, best_ndcg, best_iter = hr, ndcg, -1
    if configs.out > 0:
        model.save_weights(model_out_file, overwrite=True)

    # -----------------Training model-------------
    for epoch in range(configs.epochs):
        t1 = time()
        # Generate training instances
        user_input, item_input, labels = get_train_instances(train, configs.num_negatives)
        print('Get_train_instances [%.1f s]' % (time() - t1))
コード例 #9
0
# model 4
smoke = load_model('Model/4.model')

# ArgumentParser에 원하는 description을 입력하여 parser객체 생성
parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')  # openpose에 의한 작업 인식?

parser.add_argument('--video', help='Path to video file.',
                    default=os.path.basename("C:/Users/haram/PycharmProjects/OpenBankProject/"
                                             "tabaco1.mp4"))

# parse_args() method로 명령창에서 주어진 인자를 파싱한다.
args = parser.parse_args()  # args 이름으로 파싱 성공시 args.parameter 형태로 주어진 인자 값을 받아서 사용가능

# 관련 모델 가져오기
# tensorflow 추상화 라이브러리
estimator = load_pretrain_model('VGG_origin')  # 훈련 모델 로드(VGG_origin) 분류??

# 인자 초기화
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

# 동영상 파일 읽고 쓰기(웹캠 입력만 테스트)
cap = choose_run_mode(args)  # cap 객체에 choose_run_mode 파싱

while cv.waitKey(1) < 0: #키가 입력될때까지 반복
    data = []
    has_frame, show = cap.read()  # has_frame 과 show에 비디오를 한프레임씩 읽음 성공시 True, 실패시 False
コード例 #10
0
ファイル: train.py プロジェクト: zbxzc35/pytorch_CAM
def main():

   ######### config  ###########

   configs = config()
   pretrain_vgg16_path = os.path.join(configs.py_dir,'model/vgg16_from_caffe.pth')

   ########  load training data ########
   ######### imagenet ############
   '''''''''
   train_img_dir = os.path.join(configs.data_dir, 'img/train')
   train_label_dir = os.path.join(configs.data_dir,'label/map_clsloc.txt')
   val_img_dir = os.path.join(configs.data_dir, 'img/val')
   val_txt_dir = os.path.join(configs.data_dir, 'img/val.txt')
   val_label_dir = os.path.join(configs.data_dir,'label/ILSVRC2012_validation_ground_truth.txt')

   normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])

   transform = transforms.Compose([
       transforms.RandomSizedCrop(224),
       transforms.RandomHorizontalFlip(),
       transforms.ToTensor(),
       normalize
   ])

   train_data = imgnet.ImagenetDataset(img_path=train_img_dir,label_path=train_label_dir,
                                       file_name_txt_path='',split_flag='train',transform=transform)

   train_loader = Data.DataLoader(train_data,batch_size=configs.batch_size,
                                  shuffle=True, num_workers=2, pin_memory= True)

   val_data = imgnet.ImagenetDataset(img_path=val_img_dir,label_path=val_label_dir,
                                     file_name_txt_path= val_txt_dir, split_flag='valid', transform=transform)

   val_loader = Data.DataLoader(val_data, batch_size=configs.batch_size,
                                shuffle= False, num_workers= 2, pin_memory= True)
   '''''''''

   transform_train = transforms.Compose([
       transforms.RandomCrop(32, padding=4),
       transforms.RandomHorizontalFlip(),
       transforms.ToTensor(),
       transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
   ])

   transform_test = transforms.Compose([
       transforms.ToTensor(),
       transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
   ])

   trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
   train_loader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)

   testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
   val_loader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)

   ######### build vgg model ##########

   vgg_cam = models.vgg_cam()
   vgg_pretrain_model = utils.load_pretrain_model(pretrain_vgg16_path)
   vgg_cam.copy_params_from_pretrain_vgg(vgg_pretrain_model,init_fc8=configs.init_random_fc8)
   vgg_cam = vgg_cam.cuda()

   ########## optim  ###########

   #optimizer = torch.optim.SGD(vgg_cam.parameters(),lr=configs.learning_rate,momentum=configs.momentum)
   optimizer = torch.optim.Adam(vgg_cam.parameters(),lr=configs.learning_rate,weight_decay=configs.weight_decay)
   loss_fun = nn.CrossEntropyLoss()

   for epoch in range(20):

       for step, (img_x,label_x) in enumerate(train_loader):

           img,label = Variable(img_x.cuda()), Variable(label_x.cuda())
           predict, _ = vgg_cam(img)
           loss = loss_fun(predict, label)
           optimizer.zero_grad()
           loss.backward()
           optimizer.step()
           if (step) % 20 == 0:
               print("Epoch [%d/%d] Loss: %.4f" % (epoch, step, loss.data[0]))

           if (step) % configs.save_ckpoints_iter_number == 0:

               torch.save(vgg_cam,os.path.join(configs.save_ckpt_dir, 'cam' + str(step) + '.pkl'))

           if step % configs.validate_iter_number == 0:

               test(vgg_cam,val_loader,loss_fun)
コード例 #11
0
def main(args):
    for arg in vars(args):
        print(arg, getattr(args, arg))

    model_name = args.model_name
    img_path = args.img_path
    img_label_path = 'imagenet.json'
    true_class = args.true_label
    adversarial_label = args.adv_label
    label_num = args.label_num
    lambda_up, lambda_down, lambda_label_loss = args.lambda_up, args.lambda_down, args.lambda_label_loss

    # model_name = 'inception_v3'
    # img_path = './picture/dog_cat.jpg'
    # img_label_path = 'imagenet.json'
    # true_class = 208
    sess, graph, img_size, images_pl, logits = load_pretrain_model(
        model_name, is_explain=True)
    y_label = tf.placeholder(dtype=tf.int32, shape=())
    label_logits = logits[0, y_label]

    if len(args.imp) > 0:
        img = np.load(args.imp)
        init_epoch = int(args.imp[:-4].split('_')[-1])
        loss_list = list(np.load('loss_' + args.imp))
    else:
        img = PIL.Image.open(img_path)
        img = preprocess_img(img, img_size)
        init_epoch = 0
        loss_list = []

    old_img = np.array(img)
    batch_img = np.expand_dims(img, 0)

    #new_img = np.load('vgg16_30_0.0004_1000_0.001_0.03_4000.npy')
    #new_batch_img = np.concatenate((np.expand_dims(new_img,0),batch_img),axis=0)
    #new_batch_img = np.expand_dims(new_img,0)
    #all_img = np.concatenate((batch_img,new_batch_img))
    imagenet_label = load_imagenet_label(img_label_path)
    prob = tf.nn.softmax(logits)
    _prob = sess.run(prob, feed_dict={images_pl: batch_img})[0]
    #classify(img,_prob,imagenet_label,1,1)

    ####
    #deep explain
    # from deepexplain.tensorflow import DeepExplain
    # label_logits = logits[0,208]
    # with DeepExplain(session=sess) as de:
    #     attributions = {
    #         # Gradient-based
    #         # NOTE: reduce_max is used to select the output unit for the class predicted by the classifier
    #         # For an example of how to use the ground-truth labels instead, see mnist_cnn_keras notebook
    #         'Saliency maps': de.explain('saliency', label_logits, images_pl, batch_img),
    #         'Gradient * Input': de.explain('grad*input', label_logits, images_pl, batch_img),
    #         # 'Integrated Gradients': de.explain('intgrad', label_logits, images_pl, new_batch_img),
    #         'Epsilon-LRP': de.explain('elrp', label_logits, images_pl, batch_img),
    #         'DeepLIFT (Rescale)': de.explain('deeplift', label_logits, images_pl, batch_img),
    #         # Perturbation-based (comment out to evaluate, but this will take a while!)
    #         #'Occlusion [15x15]':    de.explain('occlusion', label_logits, images_pl, batch_img, window_shape=(15,15,3), step=4)
    #     }    ####
    #     new_attributions = {
    #         # Gradient-based
    #         # NOTE: reduce_max is used to select the output unit for the class predicted by the classifier
    #         # For an example of how to use the ground-truth labels instead, see mnist_cnn_keras notebook
    #         'Saliency maps': de.explain('saliency', label_logits, images_pl, new_batch_img),
    #         'Gradient * Input': de.explain('grad*input', label_logits, images_pl, new_batch_img),
    #         # 'Integrated Gradients': de.explain('intgrad', label_logits, images_pl, new_batch_img),
    #         'Epsilon-LRP': de.explain('elrp', label_logits, images_pl, new_batch_img),
    #         'DeepLIFT (Rescale)': de.explain('deeplift', label_logits, images_pl, new_batch_img),
    #         # Perturbation-based (comment out to evaluate, but this will take a while!)
    #         #'Occlusion [15x15]':    de.explain('occlusion', label_logits, images_pl, batch_img, window_shape=(15,15,3), step=4)
    #     }    ####
    #     attributions['Saliency maps'] = np.concatenate((attributions['Saliency maps'],new_attributions['Saliency maps']),axis=0)
    #     attributions['Gradient * Input'] = np.concatenate((attributions['Gradient * Input'],new_attributions['Gradient * Input']),axis=0)
    #     attributions['Epsilon-LRP'] = np.concatenate((attributions['Epsilon-LRP'],new_attributions['Epsilon-LRP']),axis=0)
    #     attributions['DeepLIFT (Rescale)'] = np.concatenate((attributions['DeepLIFT (Rescale)'],new_attributions['DeepLIFT (Rescale)']),axis=0)
    #
    # n_cols = int(len(attributions)) + 1
    # n_rows = 2
    # fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(3 * n_cols, 3 * n_rows))
    #
    # for i, xi in enumerate(all_img):
    #     # xi = (xi - np.min(xi))
    #     # xi /= np.max(xi)
    #     ax = axes.flatten()[i * n_cols]
    #     ax.imshow(xi)
    #     ax.set_title('Original')
    #     ax.axis('off')
    #     for j, a in enumerate(attributions):
    #         axj = axes.flatten()[i * n_cols + j + 1]
    #         plot(attributions[a][i], xi=xi, axis=axj, dilation=.5, percentile=99, alpha=.2).set_title(a)
    ######
    label_logits = logits[0, 208]
    with DeepExplain(session=sess) as de:
        dlift = de.explain('deeplift', label_logits, images_pl, batch_img)

    grad_map_tensor = tf.gradients(label_logits, images_pl)[0]
    grad_map = sess.run(grad_map_tensor,
                        feed_dict={
                            images_pl: np.expand_dims(img, 0),
                            y_label: true_class
                        })

    gradient_saliency = saliency.GradientSaliency(graph, sess, label_logits,
                                                  images_pl)  # 1951/1874
    vanilla_mask_3d = gradient_saliency.GetMask(
        img, feed_dict={y_label: true_class})  # better
    vanilla_mask_grayscale = saliency.VisualizeImageGrayscale(vanilla_mask_3d)

    # smoothgrad_mask_3d = gradient_saliency.GetSmoothedMask(img, feed_dict={y_label:true_class}) # much clear, 2204/2192
    # smoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)

    #
    # new_img = np.load('vgg16_60_70_35_45_30_0.0001_800_0.0_0.0_9000.npy')
    # new_grad_map = sess.run(grad_map_tensor,feed_dict={images_pl:np.expand_dims(new_img,0),y_label:true_class})
    # new_vanilla_mask_3d = gradient_saliency.GetMask(new_img, feed_dict={y_label:true_class}) # better
    # new_vanilla_mask_grayscale = saliency.VisualizeImageGrayscale(new_vanilla_mask_3d)
    # new_smoothgrad_mask_3d = gradient_saliency.GetSmoothedMask(new_img, feed_dict={y_label:true_class}) # much clear, 2204/2192
    # new_smoothgrad_mask_grayscale = saliency.VisualizeImageGrayscale(new_smoothgrad_mask_3d)

    #to_dec_center = (60,70)
    to_dec_center = (100, 65)
    #to_dec_radius = (35,45)
    to_dec_radius = (80, 60)
    to_inc_center = (120, 170)
    to_inc_radius = (40, 30)
    _map = vanilla_mask_grayscale
    print(calculate_region_importance(_map, to_dec_center, to_dec_radius))
    print(calculate_region_importance(_map, to_inc_center, to_inc_radius))

    # construct to_inc_region and to_dec_region
    to_dec_region = calculate_img_region_importance(grad_map_tensor,
                                                    to_dec_center,
                                                    to_dec_radius)
    to_inc_region = calculate_img_region_importance(grad_map_tensor,
                                                    to_inc_center,
                                                    to_inc_radius)

    # try NES (Natural evolutionary strategies)
    N = args.N
    sigma = args.sigma
    epsilon = round(args.eps, 2)
    epoch = args.epoch
    eta = args.lr
    #loss = to_dec_region/to_inc_region
    #old_loss = sess.run(loss,feed_dict={images_pl: np.expand_dims(img, 0), y_label: true_class})
    old_loss = calculate_deeplift_loss(dlift, to_dec_center, to_dec_radius,
                                       to_inc_center, to_inc_radius)
    num_list = '_'.join([
        'big', model_name,
        str(N),
        str(eta),
        str(epoch),
        str(sigma),
        str(epsilon)
    ])
    print(num_list)
    for i in range(epoch):
        delta = np.random.randn(int(N / 2), img_size * img_size * 3)
        delta = np.concatenate((delta, -delta), axis=0)
        grad_sum = 0
        f_value_list = []
        for idelta in delta:
            img_plus = np.clip(
                img + sigma * idelta.reshape(img_size, img_size, 3), 0, 1)
            #f_value = sess.run(loss,feed_dict={images_pl:np.expand_dims(img_plus,0),y_label:true_class})
            with DeepExplain(session=sess) as de:
                dlift = de.explain('deeplift', label_logits, images_pl,
                                   np.expand_dims(img_plus, 0))
            f_value = calculate_deeplift_loss(dlift, to_dec_center,
                                              to_dec_radius, to_inc_center,
                                              to_inc_radius)
            f_value_list.append(f_value)
            grad_sum += f_value * idelta.reshape(img_size, img_size, 3)
        grad_sum = grad_sum / (N * sigma)
        new_img = np.clip(
            np.clip(img - eta * grad_sum, old_img - epsilon,
                    old_img + epsilon), 0, 1)
        #new_loss, new_logits = sess.run([loss, logits],
        #                                feed_dict={images_pl: np.expand_dims(new_img, 0), y_label: true_class})
        with DeepExplain(session=sess) as de:
            dlift = de.explain('deeplift', label_logits, images_pl,
                               np.expand_dims(new_img, 0))
        new_loss = calculate_deeplift_loss(dlift, to_dec_center, to_dec_radius,
                                           to_inc_center, to_inc_radius)

        loss_list.append(new_loss)
        print("epoch:{} new:{}, old:{}, {}".format(i, new_loss, old_loss,
                                                   np.argmax(_prob)))
        sys.stdout.flush()
        img = np.array(new_img)
        if i % args.image_interval == 0:
            temp_name = num_list + '_' + str(i + init_epoch)
            np.save(temp_name, new_img)
        if i % args.image_interval == 0:
            np.save('loss_' + temp_name, loss_list)
    np.save(num_list + '_' + str(epoch + init_epoch), new_img)
    np.save('loss_' + num_list + '_' + str(epoch + init_epoch), loss_list)
コード例 #12
0
def main():

    #########  configs ###########
    best_metric = 0
    pretrain_vgg16_path = os.path.join(configs.py_dir,
                                       'model/vgg16_from_caffe.pth')

    ######  load datasets ########

    train_data = voc_dates.VOCDataset(configs.train_img_dir,
                                      configs.train_label_dir,
                                      configs.train_txt_dir,
                                      'train',
                                      transform=True)
    train_loader = Data.DataLoader(train_data,
                                   batch_size=configs.batch_size,
                                   shuffle=True,
                                   num_workers=4,
                                   pin_memory=True)

    val_data = voc_dates.VOCDataset(configs.val_img_dir,
                                    configs.val_label_dir,
                                    configs.val_txt_dir,
                                    'val',
                                    transform=True)
    val_loader = Data.DataLoader(val_data,
                                 batch_size=configs.batch_size,
                                 shuffle=False,
                                 num_workers=4,
                                 pin_memory=True)
    ######  build  models ########
    fcn32s = models.fcn32s()
    vgg_pretrain_model = utils.load_pretrain_model(pretrain_vgg16_path)
    fcn32s.init_parameters(vgg_pretrain_model)
    fcn32s = fcn32s.cuda()
    #########

    if resume:
        checkpoint = torch.load(configs.best_ckpt_dir)
        fcn32s.load_state_dict(checkpoint['state_dict'])
        print('resum sucess')

    ######### optimizer ##########
    ######## how to set different learning rate for differern layer #########
    optimizer = torch.optim.SGD([
        {
            'params': get_parameters(fcn32s, bias=False)
        },
        {
            'params': get_parameters(fcn32s, bias=True),
            'lr': configs.learning_rate * 2,
            'weight_decay': 0
        },
    ],
                                lr=configs.learning_rate,
                                momentum=configs.momentum,
                                weight_decay=configs.weight_decay)

    ######## iter img_label pairs ###########

    for epoch in range(20):

        utils.adjust_learning_rate(configs.learning_rate, optimizer, epoch)
        for batch_idx, (img_idx, label_idx) in enumerate(train_loader):

            img, label = Variable(img_idx.cuda()), Variable(label_idx.cuda())
            prediction = fcn32s(img)
            loss = utils.cross_entropy2d(prediction, label, size_average=False)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (batch_idx) % 20 == 0:
                print("Epoch [%d/%d] Loss: %.4f" %
                      (epoch, batch_idx, loss.data[0]))

        current_metric = validate(fcn32s, val_loader, epoch)

        if current_metric > best_metric:

            torch.save({'state_dict': fcn32s.state_dict()},
                       os.path.join(configs.save_ckpt_dir,
                                    'fcn32s' + str(epoch) + '.pth'))

            shutil.copy(
                os.path.join(configs.save_ckpt_dir,
                             'fcn32s' + str(epoch) + '.pth'),
                os.path.join(configs.save_ckpt_dir, 'model_best.pth'))
            best_metric = current_metric

        if epoch % 5 == 0:
            torch.save({'state_dict': fcn32s.state_dict()},
                       os.path.join(configs.save_ckpt_dir,
                                    'fcn32s' + str(epoch) + '.pth'))
コード例 #13
0
ファイル: train_action40.py プロジェクト: zbxzc35/pytorch_CAM
def main():

   ######### config  ###########

   best_metric = 0
   pretrain_vgg16_path = os.path.join(configs.py_dir,'model/vgg16_from_caffe.pth')

   ########  load training data ########
   ######### action 40  ############

   normalize = trans.Normalize(mean=[0.4001, 0.4401, 0.4687],std = [0.229, 0.224, 0.225])
                                    #std=[1, 1, 1])
   train_transform = trans.Compose([
       trans.RandomCrop(224,padding=4),
       trans.RandomHorizontalFlip(),
       trans.ToTensor(),
       normalize,
   ])

   val_transform = trans.Compose([
       trans.Scale((224,224)),
       trans.ToTensor(),
       normalize,
   ])

   train_data = imgfolder.ImageFolder(os.path.join(configs.data_dir,'img/train'),transform=train_transform)
   train_loader = torch.utils.data.DataLoader(
       train_data, batch_size=configs.batch_size, shuffle=True,
       num_workers=4, pin_memory=True)

   val_data = imgfolder.ImageFolder(os.path.join(configs.data_dir,'img/val'),transform=val_transform)
   val_loader = Data.DataLoader(val_data,batch_size=configs.batch_size,
                                shuffle= False, num_workers= 4, pin_memory= True)

   ######### build vgg model ##########

   vgg_cam = models.vgg_cam()
   vgg_pretrain_model = utils.load_pretrain_model(pretrain_vgg16_path)
   vgg_cam.copy_params_from_pretrain_vgg(vgg_pretrain_model,init_fc8=configs.init_random_fc8)
   vgg_cam = vgg_cam.cuda()

   ########  resume  ###########
   if resume:
        checkpoint = torch.load('/media/cheer/2T/train_pytorch/cam/ckpt/model_best.pth')
        vgg_cam.load_state_dict(checkpoint['state_dict'])
   ########## optim  ###########

   optimizer = torch.optim.SGD(vgg_cam.parameters(),lr=configs.learning_rate,momentum=configs.momentum,weight_decay=configs.weight_decay)
   #optimizer = torch.optim.Adam(vgg_cam.parameters(),lr=configs.learning_rate,weight_decay=configs.weight_decay)
   loss_fun = nn.CrossEntropyLoss()

   for epoch in range(200):

       adjust_learning_rate(optimizer, epoch)
       for step, (img_x,label_x) in enumerate(train_loader):

           img,label = Variable(img_x.cuda()), Variable(label_x.cuda())
           predict, _ = vgg_cam(img)
           loss = loss_fun(predict, label)
           optimizer.zero_grad()
           loss.backward()
           optimizer.step()
           if (step) % 20 == 0:
               print("Epoch [%d/%d] Loss: %.4f" % (epoch, step, loss.data[0]))

       current_metric = test(vgg_cam, val_loader, loss_fun)

       if current_metric > best_metric:

           torch.save({'state_dict': vgg_cam.state_dict()}, os.path.join(configs.save_ckpt_dir, 'cam' + str(epoch) + '.pth'))

           shutil.copy(os.path.join(configs.save_ckpt_dir, 'cam' + str(epoch) + '.pth'),
                       os.path.join(configs.save_ckpt_dir, 'model_best.pth'))
           best_metric = current_metric

       if epoch % 10 == 0:

           torch.save({'state_dict': vgg_cam.state_dict()}, os.path.join(configs.save_ckpt_dir, 'cam' + str(epoch) + '.pth'))
コード例 #14
0
def main(args):
    for arg in vars(args):
        print(arg, getattr(args, arg))

    model_name = args.model_name
    img_path = args.img_path
    img_label_path = 'imagenet.json'
    true_class = args.true_label
    adversarial_label = args.adv_label
    demo_epoch = args.epoch
    demo_eps = args.eps
    demo_lr = args.lr
    label_num = args.label_num
    lambda_up, lambda_down, lambda_label_loss = args.lambda_up, args.lambda_down, args.lambda_label_loss

    # load model
    sess, graph, img_size, images_v, logits = load_pretrain_model(model_name)
    probs = tf.nn.softmax(logits)
    print("sucessfully load model")

    if args.write_summary:
        unique_path_name = "up{}down{}ce{}epoch{}lr{}".format(
            args.lambda_up, args.lambda_down, args.lambda_label_loss,
            args.epoch, args.lr)
        final_summary_path = os.path.join(args.summary_path, unique_path_name)
        if not os.path.exists(final_summary_path):
            os.makedirs(final_summary_path)
        summary_writer = tf.summary.FileWriter(final_summary_path, graph)

    global_step = tf.Variable(0, name="global_step", trainable=False)
    step_init = tf.variables_initializer([global_step])

    y_hat = tf.placeholder(tf.int32, ())
    label_logits = tf.gather_nd(logits, [[0, y_hat]])

    img = PIL.Image.open(img_path)
    img = preprocess_img(img, img_size)
    batch_img = np.expand_dims(img, 0)
    imagenet_label = load_imagenet_label(img_label_path)

    # -------------------
    # Step 1: classify the image with original model
    p = sess.run(probs, feed_dict={images_v: batch_img})[0]
    predict_label = np.argmax(p)
    #classify(img, p, imagenet_label, correct_class=true_class, is_cluster=True)

    # -------------------
    # Step 2: Construct adversarial examples
    image_pl = tf.placeholder(tf.float32, (1, img_size, img_size, 3))
    assign_op = tf.assign(images_v, image_pl)
    learning_rate = tf.placeholder(tf.float32, ())
    var_eps = tf.placeholder(tf.float32, ())
    labels = tf.one_hot(y_hat, label_num)
    loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                   labels=labels)[0]

    projected = tf.clip_by_value(
        (tf.clip_by_value(images_v, image_pl - var_eps, image_pl + var_eps)),
        0, 1)
    with tf.control_dependencies([projected]):
        project_step = tf.assign(images_v, projected)

    # initialization step
    _ = sess.run([assign_op, step_init], feed_dict={image_pl: batch_img})

    # construct targeted attack
    # feed_dict_optim = {image_pl:batch_img,
    #                    y_hat:adversarial_label,
    #                    learning_rate:demo_lr}
    #
    # feed_dict_proj = {image_pl:batch_img,
    #                   var_eps:demo_eps}
    # optim_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, var_list=[images_v])
    # model_train(sess=sess,
    #             optim_step=optim_step,
    #             project_step=project_step,
    #             loss=loss,
    #             feed_dict_optim=feed_dict_optim,
    #             feed_dict_project=feed_dict_proj,
    #             epoch=10)
    #
    # adv_img = np.squeeze(images_v.eval(),0)
    # adv_prob = sess.run(probs,feed_dict={images_v:np.expand_dims(adv_img,0)})
    # classify(adv_img, adv_prob[0],imagenet_label,correct_class=281,target_class=adversarial_label)
    #
    # # show the saliency map
    # feed_dict_gradient = {y_hat:true_class}
    # _ = show_gradient_map(graph=graph,
    #                   sess=sess,
    #                   y=label_logits,
    #                   x=images_v,
    #                   img=img,
    #                   is_integrated=False,
    #                   is_smooth=False,
    #                   feed_dict=feed_dict_gradient)
    #---------------
    # use gradient descent to control the saliency map

    # original gradient intensity
    map3D, map_grey = show_gradient_map(graph=graph,
                                        sess=sess,
                                        y=label_logits,
                                        x=images_v,
                                        img=img,
                                        is_integrated=False,
                                        is_smooth=True,
                                        feed_dict={y_hat: true_class},
                                        is_cluster=args.is_cluster)

    center_more, radius_more = (100, 110), 10
    center_less, radius_less = (100, 70), 10
    gradient_more = calculate_region_importance(map_grey, center_more,
                                                radius_more)
    gradient_less = calculate_region_importance(map_grey, center_less,
                                                radius_less)
    print(
        "region 1 gradient intensity %.3f, region 2 gradient intensity %.3f" %
        (gradient_more, gradient_less))

    # construct new loss function
    grad_map = tf.gradients(label_logits, images_v)[0]
    to_down_gradient = calculate_img_region_importance(grad_map, center_more,
                                                       radius_more)
    to_up_gradient = calculate_img_region_importance(grad_map, center_less,
                                                     radius_less)
    grad_loss = -lambda_up * to_up_gradient + lambda_down * to_down_gradient
    final_loss = grad_loss + lambda_label_loss * loss
    if args.write_summary:
        up_gradient_summary = tf.summary.scalar("up_gradient", to_up_gradient)
        down_gradient_summary = tf.summary.scalar("down_gradient",
                                                  to_down_gradient)
        loss_summary = tf.summary.scalar("loss", loss)
        train_summary_op = tf.summary.merge_all()
    change_grad_optim_step = tf.train.GradientDescentOptimizer(
        learning_rate=demo_lr).minimize(final_loss,
                                        var_list=[images_v],
                                        global_step=global_step)
    for i in range(demo_epoch):
        if args.write_summary:
            _, _loss, step, summary_str = sess.run([
                change_grad_optim_step, final_loss, global_step,
                train_summary_op
            ],
                                                   feed_dict={
                                                       image_pl: batch_img,
                                                       y_hat: true_class,
                                                       learning_rate: demo_lr
                                                   })
            summary_writer.add_summary(summary_str, global_step=step)
        else:
            _, _loss, step = sess.run(
                [change_grad_optim_step, final_loss, global_step],
                feed_dict={
                    image_pl: batch_img,
                    y_hat: true_class,
                    learning_rate: demo_lr
                })

        sess.run([project_step],
                 feed_dict={
                     image_pl: batch_img,
                     var_eps: demo_eps
                 })
        print("%d loss = %g" % (i, _loss))
        if i % args.image_interval == 0:
            adv_img = np.squeeze(images_v.eval(), 0)
            # check the prediction result
            p_adv = sess.run(probs, feed_dict={images_v: batch_img})[0]
            predict_label_adv = np.argmax(p_adv)
            #classify(adv_img, p_adv, imagenet_label, correct_class=true_class,is_cluster=args.is_cluster)

            # check the gradient map
            map3D_adv, map_grey_adv = show_gradient_map(
                graph=graph,
                sess=sess,
                y=label_logits,
                x=images_v,
                img=adv_img,
                is_integrated=False,
                is_smooth=False,
                feed_dict={y_hat: true_class},
                is_cluster=args.is_cluster)

            adv_gradient_more = calculate_region_importance(
                map_grey_adv, center_more, radius_more)
            adv_gradient_less = calculate_region_importance(
                map_grey_adv, center_less, radius_less)

            if args.write_summary:
                map_grey_adv = tf.expand_dims(tf.expand_dims(map_grey_adv, 0),
                                              3)
                adv_map_sum = tf.summary.image(
                    'adv_map' + str(i), tf.convert_to_tensor(map_grey_adv))
                adv_str = sess.run(adv_map_sum)
                summary_writer.add_summary(adv_str)
            print(
                "Adversarial Case: predict label: %d, big region  gradient intensity: %.3f, small region gradient intensity: %.3f"
                % (predict_label_adv, adv_gradient_more, adv_gradient_less))
            print(
                "Normal Case: predict label: %d, big region gradient intensity: %.3f, small region gradient intensity: %.3f"
                % (predict_label, gradient_more, gradient_less))

    # write original map
    map_grey = tf.expand_dims(tf.expand_dims(map_grey, 0), 3)
    orig_map_sum = tf.summary.image('orig_map', tf.convert_to_tensor(map_grey))
    orig_str = sess.run(orig_map_sum)
    summary_writer.add_summary(orig_str)