示例#1
0
def singel_strength():
    for i in range(9):
        print('*' * 45)
        print('Training {}_th model'.format(i + 1))
        #print (i)

        if i < 9:
            eps = [0.9]
            print('eps is {}'.format(eps))
            print('*' * 45)
            print('\n' * 2)
            model = torch.load(
                '/home/hankeji/Desktop/jsai/models/GTSRB_submodels_0.pkl').cpu(
                )

            print(id(model))
            print(id(train_model))
            for batch_idx, (data, target) in enumerate(train_loader):
                data, target = Variable(data,
                                        requires_grad=True), Variable(target)
                optimizer0 = optim.Adam(model.parameters(),
                                        lr=0.01,
                                        weight_decay=1e-4)
                output = model(data)
                loss = cit(output, target)
                optimizer0.zero_grad()
                loss.backward()
                optimizer0.step()

                attack_model_index = random.sample([0, 1, 2, 4], 1)[0]
                attack_model = model_list[attack_model_index].cpu()
                optimizer1 = optim.Adam(attack_model.parameters(),
                                        lr=0.01,
                                        weight_decay=1e-4)
                adv = symbolic_fgs(attack_model, cit, data, target, eps=eps)
                data = Variable(adv.data, requires_grad=True)
                output1 = model(data)
                optimizer0.zero_grad()
                loss = cit(output1, target)
                loss.backward()
                optimizer0.step()
            torch.save(
                model,
                '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_du_test.pkl'
            )
            del (model)
            gc.collect()
示例#2
0
def mix_strength():
    #model=model_list[0].cpu()
    #a=b1(1, train_model)
    #print (a)
    eps0 = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    #eps0=[0.2, 0.6, 0.9]
    #eps0=search_strength()
    #tmp_n = np.random.randint(1, 9, 1)[0]
    eps0 = random.sample(eps0, 3)
    tmp = len(eps0)
    print('\n' * 2)
    print(tmp)
    train_model = torch.load(
        '/home/hankeji/Desktop/jsai/models/GTSRB_submodels_0.pkl').cpu()
    for i in range(10):
        eps = [eps0[i % 3]]

        for batch_idx, (data, target) in enumerate(train_loader):

            print(batch_idx)
            data, target = Variable(data, requires_grad=True), Variable(target)

            optimizer0 = optim.Adam(train_model.parameters(),
                                    lr=0.01,
                                    weight_decay=1e-4)
            output = train_model(data)
            loss = cit(output, target)
            optimizer0.zero_grad()
            loss.backward()
            optimizer0.step()

            attack_model_index = random.sample([0, 1, 2, 4], 1)[0]
            attack_model = model_list[attack_model_index].cpu()
            #optimizer1 = optim.Adam(attack_model.parameters(), lr=0.01, weight_decay=1e-4)
            adv = symbolic_fgs(attack_model, cit, data, target, eps=eps)
            data = Variable(adv.data, requires_grad=True)
            output1 = train_model(data)
            optimizer0.zero_grad()
            loss = cit(output1, target)
            loss.backward()
            optimizer0.step()

    torch.save(
        train_model,
        '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_random_strength.pkl'
    )
示例#3
0
def search_strength():
    attack_index = random.sample([3], 1)[0]
    attack_model = model_list[attack_index].cpu()
    strength_list = []
    strength_list = np.asarray(strength_list, np.float)
    for i in range(9):
        print('Searching scale of {}!'.format((i + 1) * 0.1))
        eps = [(i + 1) * 0.1]
        tmp_acc = []
        tmp_acc = np.asarray(tmp_acc, np.float)
        corr = 0
        for j in range(9):
            tmp_model = torch.load(
                '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_' +
                str((j + 1) * 0.1) + '.pkl').cpu()
            for batch_idx, (data, target) in enumerate(test_loader):
                data, target = Variable(data,
                                        requires_grad=True), Variable(target)
                if batch_idx > 0:
                    break
                adv = symbolic_fgs(attack_model, cit, data, target, eps=eps)
                data = Variable(adv.data, requires_grad=True)

                output = tmp_model(data)
                pred = output.data.max(1)[1]

                corr += pred.eq(target.data).sum()
            corr = corr / batch_size
            tmp_acc = np.append(tmp_acc, corr)
            tmp_acc = np.reshape(tmp_acc, (-1, 1))
        tmp_acc = torch.from_numpy(tmp_acc)
        tmp_acc_idx = tmp_acc.max(0)[1]
        aa = float((tmp_acc_idx.numpy() + 1)) * 0.1
        print(aa)
        strength_list = np.append(strength_list,
                                  float((tmp_acc_idx.numpy() + 1)) * 0.1)
    final_strength_list = []
    final_strength_list = np.asarray(final_strength_list, np.float)
    for i in strength_list:
        if i not in final_strength_list:
            final_strength_list = np.append(final_strength_list, i)
    print(final_strength_list)
    return final_strength_list
示例#4
0
def defense_eval():
    acc = []
    acc = np.asarray(acc, np.float)
    a = 31
    attack_model_index = random.sample([3], 1)[0]
    attack_model = model_list[attack_model_index].cpu()
    for i in range(9):
        tmp_model = torch.load(
            '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_' +
            str((i + 1) * 0.1) + '.pkl').cpu()
        #acc=np.append(acc, i)
        for i in range(a):
            if i == 0:
                eps = [0]
            else:
                eps = [(i) * 0.03]
            acc = np.append(acc, eps)

            corr = 0
            for batch_idx, (data, target) in enumerate(test_loader):
                if batch_idx > 0:
                    break
                print(batch_idx)
                data, target = Variable(data,
                                        requires_grad=True), Variable(target)
                data = symbolic_fgs(attack_model, cit, data, target, eps=eps)
                data = Variable(data.data, requires_grad=True)
                output = tmp_model(data)
                pred = output.data.max(1)[1]
                corr += pred.eq(target.data).cpu().sum()
            #print (batch_idx+1)
            corr = corr / ((batch_idx) * batch_size)
            acc = np.append(acc, corr)
    acc = np.reshape(acc, (9, a, 2))
    #print acc
    np.save(
        '/home/hankeji/Desktop/jsai/data/GTSRB_model_acc_single_modle_clean.npy',
        acc)  #_single model _mix_strength [0.1,0.6,0.8]
    return acc
示例#5
0
def ori_attack():
    model = torch.load(
        '/home/hankeji/Desktop/jsai/models/GTSRB_submodels_0.pkl').cpu()
    corr = 0
    #corr_du=0
    #corr_ori=0
    acc = []
    acc = np.asarray(acc, np.float)
    attack_index = random.sample([3], 1)[0]
    attack_model = model_list[attack_index].cpu()
    a = 31
    for i in range(a):
        if i == 0:
            eps = [0]
        else:
            eps = [(i) * 0.03]
        print('*' * 45)
        print('eps is {}'.format(eps[0]))
        print('*' * 45)
        print('\n' * 2)
        for batch_idx, (data, target) in enumerate(test_loader):
            if batch_idx > 0:
                break
            data, target = Variable(data, requires_grad=True), Variable(target)
            adv = symbolic_fgs(attack_model, cit, data, target, eps=eps)
            data = Variable(adv.data, requires_grad=True)

            output = model(data)
            pred = output.data.max(1)[1]
            corr += pred.eq(target.data).sum()

        corr = corr / (batch_idx * batch_size)
        acc = np.append(acc, eps)
        acc = np.append(acc, corr)

    acc = np.reshape(acc, (-1, 2))
    np.save('/home/hankeji/Desktop/jsai/data/GTSRB_ori.npy', acc)
    return acc
示例#6
0
def vs():
    attack_index = random.sample([3], 1)[0]
    attack_model = model_list[attack_index].cpu()
    mm = torch.load(
        '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_all_strength.pkl')
    ms = torch.load(
        '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_0.3.pkl')
    m_all = torch.load(
        '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_all_strength.pkl')
    m_random = torch.load(
        '/home/hankeji/Desktop/model_GTSRB/GTSRB_multi_model_random_strength.pkl'
    )
    m_sm = torch.load(
        '/home/hankeji/Desktop/model_GTSRB/GTSRB_single_model_multi_strength.pkl'
    )
    m_ss = torch.load(
        '/home/hankeji/Desktop/model_GTSRB/GTSRB_single_model_0.3.pkl')
    acc = []
    acc = np.asarray(acc, np.float)
    a = 31
    for i in range(a):
        acc_mm = 0
        acc_ms = 0
        acc_m_all = 0
        acc_m_random = 0
        acc_sm = 0
        acc_ss = 0
        if i == 0:
            eps = [0]
        else:
            eps = [(i + 1) * 0.03]

        for batch_idx, (data, target) in enumerate(test_loader):
            #print (data[0])
            if batch_idx > 0:
                break
            print('Here coming {}_th batch'.format(batch_idx + 1))
            data, target = Variable(data, requires_grad=True), Variable(target)
            adv = symbolic_fgs(attack_model, cit, data, target, eps)
            data = Variable(adv.data, requires_grad=True)

            output1 = mm(data)
            pred1 = output1.data.max(1)[1]
            acc_mm += pred1.eq(target.data).sum()

            output2 = ms(data)
            pred2 = output2.data.max(1)[1]
            acc_ms += pred2.eq(target.data).sum()

            output3 = m_all(data)
            pred3 = output3.data.max(1)[1]
            acc_m_all += pred3.eq(target.data).sum()

            output4 = m_random(data)
            pred4 = output4.data.max(1)[1]
            acc_m_random += pred4.eq(target.data).sum()

            output5 = m_sm(data)
            pred5 = output5.data.max(1)[1]
            acc_sm += pred5.eq(target.data).sum()

            output6 = m_ss(data)
            pred6 = output6.data.max(1)[1]
            acc_ss += pred6.eq(target.data).sum()

        acc_mm = acc_mm / batch_size  #len(test_loader.dataset)
        acc_ms = acc_ms / batch_size  # len(test_loader.dataset)
        acc_m_all = acc_m_all / batch_size  #len(test_loader.dataset)
        acc_m_random = acc_m_random / batch_size  #len(test_loader.dataset)
        acc_sm = acc_sm / batch_size  #len(test_loader.dataset)
        acc_ss = acc_ss / batch_size  #len(test_loader.dataset)

        acc = np.append(acc, eps)
        acc = np.append(acc, acc_mm)
        acc = np.append(acc, acc_ms)
        acc = np.append(acc, acc_m_all)
        acc = np.append(acc, acc_m_random)
        acc = np.append(acc, acc_sm)
        acc = np.append(acc, acc_ss)

    acc = np.reshape(acc, (a, -1))
    np.save('/home/hankeji/Desktop/jsai/data/GTSRB_mmVSms_7.npy', acc)
    return acc