def retrain(x_target,y_test,origin_acc,model,args,layer_names,selectsize=100,attack='fgsm',measure='lsa',datatype='mnist'): #y_train= npzfile['y_train'] target_lst=[] if measure=='SRS': x_select,y_select = select_rondom(selectsize,x_target,x_target,y_test) if measure=='MCP': x_select,y_select = select_my_optimize(model,selectsize,x_target,y_test) if measure=='LSA': target_lst = fetch_lsa(model, x_train, x_target, attack, layer_names, args) if measure=='DSA': target_lst = fetch_dsa(model, x_train, x_target, attack, layer_names, args) if measure=='AAL': path= "./cifar_finalResults/cifar_"+attack+"_compound8_result.csv" csv_data = pd.read_csv(path,header=None) target_lst =[] for i in range(len(csv_data.values.T)): target_lst.append(csv_data.values.T[i]) if measure=='CES': tmpfile="./conditional/"+attack+"_cifar_"+str(selectsize)+".npy" if os.path.exists(tmpfile): indexlst = list(np.load(tmpfile)) else: indexlst = condition.conditional_sample(model,x_target,selectsize) np.save(tmpfile,np.array(indexlst)) x_select,y_select = select_from_index(selectsize,x_target,indexlst,y_test) elif measure not in ['SRS','MCP']: x_select,y_select = select_from_large(selectsize, x_target, target_lst,y_test) y_select = np_utils.to_categorical(y_select, 10) y_test = np_utils.to_categorical(y_test, 10) model.compile(loss='categorical_crossentropy', optimizer="adadelta", metrics=['accuracy']) retrain_acc=0 model.fit(x_select, y_select, batch_size=100, epochs=5, shuffle=True,verbose=1, validation_data=(x_target, y_test)) score = model.evaluate(x_target, y_test,verbose=0) retrain_acc=score[1] return retrain_acc
def test(args, ntime, data): training, testing = data x_train, y_train = training x_test, y_test = testing if args.true_label: score = np.argmax(y_test, axis=1) return score if args.d == 'mnist': path_model = './random_sample_model/%s/%i/model_-75-.h5' % (args.d, ntime) model = load_model(path_model) model.summary() if args.d == 'cifar': path_model = './random_sample_model/%s/%i/model_-325-.h5' % (args.d, ntime) model = load_model(path_model) model.summary() if args.d == 'mnist': layer_num = 3 + 4 * (int(model.name.split('_')[-1]) - 1) if args.d == 'cifar': layer_num = 11 + 12 * (int(model.name.split('_')[-1]) - 1) args.layer = 'activation_' + str(layer_num) print(args.layer) if args.conf: score = model.predict(x_test) score = list(np.amax(score, axis=1)) if args.lsa: score = fetch_lsa(model, x_train, x_test, "test", [args.layer], args) if args.dsa: score = fetch_dsa(model, x_train, x_test, "test", [args.layer], args) if args.pred_label: score = model.predict(x_test) score = np.argmax(score, axis=1) return score
x_target = np.load("./adv/adv_cifar_{}.npy".format(args.target)) x_train = x_train.astype("float32") x_train = (x_train / 255.0) - (1.0 - CLIP_MAX) x_test = x_test.astype("float32") x_test = (x_test / 255.0) - (1.0 - CLIP_MAX) if args.lsa: test_lsa = fetch_lsa(model, x_train, x_test, "test", layer_names, args) target_lsa = fetch_lsa(model, x_train, x_target, args.target, layer_names, args) target_cov = get_sc(np.amin(target_lsa), args.upper_bound, args.n_bucket, target_lsa) auc = compute_roc_auc(test_lsa, target_lsa) print(infog("ROC-AUC: " + str(auc * 100))) if args.dsa: test_dsa = fetch_dsa(model, x_train, x_test, "test", layer_names, args) target_dsa = fetch_dsa(model, x_train, x_target, args.target, layer_names, args) target_cov = get_sc(np.amin(target_dsa), args.upper_bound, args.n_bucket, target_dsa) auc = compute_roc_auc(test_dsa, target_dsa) print(infog("ROC-AUC: " + str(auc * 100))) print(infog("{} coverage: ".format(args.target) + str(target_cov)))
def retrain(x_target, y_test, origin_acc, model, args, layer_names, selectsize=100, attack='fgsm', measure='lsa', datatype='mnist'): #y_train= npzfile['y_train'] target_lst = [] if measure == 'my_after_random': selected_lst = np.random.choice(range(len(x_target)), replace=False, size=selectsize * 2) selected_lst = list(selected_lst) x_tmp = x_target[selected_lst] target_lst = fetch_our_measure(model, x_tmp) if measure == 'my_entropy': target_lst = fetch_our_entropy(model, x_target) if measure == 'random': x_select, y_select = select_rondom(selectsize, x_target, x_target, y_test) if measure == 'my_optimize': x_select, y_select = select_my_optimize(model, selectsize, x_target, y_test) if measure == 'lsa': target_lst = fetch_lsa(model, x_train, x_target, attack, layer_names, args) if measure == 'dsa': target_lst = fetch_dsa(model, x_train, x_target, attack, layer_names, args) if measure == 'my': target_lst = fetch_our_measure(model, x_target) if measure == 'adaptive': path = "./cifar_finalResults/cifar_" + attack + "_compound8_result.csv" csv_data = pd.read_csv(path, header=None) target_lst = [] for i in range(len(csv_data.values.T)): target_lst.append(csv_data.values.T[i]) if measure == 'conditional': tmpfile = "./conditional/" + attack + "_cifar_" + str( selectsize) + ".npy" if os.path.exists(tmpfile): indexlst = list(np.load(tmpfile)) else: indexlst = condition.conditional_sample(model, x_target, selectsize) np.save(tmpfile, np.array(indexlst)) x_select, y_select = select_from_index(selectsize, x_target, indexlst, y_test) elif measure not in ['random', 'my_optimize']: x_select, y_select = select_from_large(selectsize, x_target, target_lst, y_test) y_select = np_utils.to_categorical(y_select, 10) y_test = np_utils.to_categorical(y_test, 10) #sgd = optimizers.SGD(lr=0.01) #loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"] model.compile(loss='categorical_crossentropy', optimizer="adadelta", metrics=['accuracy']) retrain_acc = 0 if measure in ['my', 'my_optimize']: retrain_acc = 0 #model.fit(x_select, y_select, batch_size=100, epochs=5, shuffle=True,verbose=1, validation_data=(x_target, y_test)) #score = model.evaluate(x_target, y_test,verbose=0) #retrain_acc=score[1] for i in range(5): model.fit(x_select, y_select, batch_size=100, epochs=1, shuffle=True, verbose=1, validation_data=(x_target, y_test)) score = model.evaluate(x_target, y_test, verbose=0) if score[1] > retrain_acc: retrain_acc = score[1] print('After retrain, Test accuracy: %.4f' % score[1]) else: model.fit(x_select, y_select, batch_size=100, epochs=5, shuffle=True, verbose=1, validation_data=(x_target, y_test)) score = model.evaluate(x_target, y_test, verbose=0) retrain_acc = score[1] return retrain_acc
x_target = np.load("./adv/adv_cifar_{}.npy".format(args.target)) x_train = x_train.astype("float32") x_train = (x_train / 255.0) - (1.0 - CLIP_MAX) x_test = x_test.astype("float32") x_test = (x_test / 255.0) - (1.0 - CLIP_MAX) if args.lsa: test_lsa = fetch_lsa(model, x_train, x_test, "test", layer_names, args) target_lsa = fetch_lsa(model, x_train, x_target, args.target, layer_names, args) target_cov = get_sc( np.amin(target_lsa), args.upper_bound, args.n_bucket, target_lsa ) auc = compute_roc_auc(test_lsa, target_lsa) print(infog("ROC-AUC: " + str(auc * 100))) if args.dsa: test_dsa = fetch_dsa(model, x_train, x_test, "test", layer_names, args) target_dsa = fetch_dsa(model, x_train, x_target, args.target, layer_names, args) target_cov = get_sc( np.amin(target_dsa), args.upper_bound, args.n_bucket, target_dsa ) auc = compute_roc_auc(test_dsa, target_dsa) print(infog("ROC-AUC: " + str(auc * 100))) print(infog("{} coverage: ".format(args.target) + str(target_cov)))
y_pred_train = model.predict(x_train) correct, incorrect = mnist_get_correct_and_incorrect_test_images( y_pred=y_pred_train, y_true=y_train) if args.lsa == True: layer_names = ["activation_11"] lsa = fetch_lsa(model, x_train, x_train, "train", layer_names, args) write_file(path_file='./results_training_data/lsa_{}.txt'.format( args.d), data=lsa) if args.dsa == True: layer_names = ["activation_11"] dsa = fetch_dsa(model, x_train, x_train, "train", layer_names, args) write_file(path_file='./results_training_data/dsa_{}.txt'.format( args.d), data=dsa) if args.conf == True: conf_score = list(np.amax(y_pred_train, axis=1)) write_file(path_file='./results_training_data/conf_{}.txt'.format( args.d), data=conf_score) if args.conf_no_norm == True: layer_names = ['dense_2'] temp_model = Model( inputs=model.input,
test_lsa = fetch_lsa(model, x_train, x_test, "test", [args.layer], args) exit() write_file(path_file='./metrics/{}_lsa_{}.txt'.format( args.d, args.layer), data=test_lsa) elif args.d == 'imagenet': test_lsa = fetch_lsa(model, x_train, x_test, "test", [args.layer], args) write_file(path_file='./metrics/{}_lsa_{}_{}.txt'.format( args.d, args.model, args.layer), data=test_lsa) if args.dsa: if args.d == 'mnist' or args.d == 'cifar': test_dsa = fetch_dsa(model, x_train, x_test, "test", [args.layer], args) write_file(path_file='./metrics/{}_dsa_{}.txt'.format( args.d, args.layer), data=test_dsa) elif args.d == 'imagenet': test_dsa = fetch_dsa(model, x_train, x_test, "test", [args.layer], args) write_file(path_file='./metrics/{}_dsa_{}_{}.txt'.format( args.d, args.model, args.layer), data=test_dsa) if args.conf: if args.d == 'mnist' or args.d == 'cifar': y_pred = model.predict(x_test) test_conf = list(np.amax(y_pred, axis=1)) write_file(path_file='./metrics/{}_conf.txt'.format(args.d),
model = load_model( './model_tracking/cifar_model_improvement-496-0.87.h5') model.summary() layer_names = ["activation_11"] # Load target set. x_target = np.load("./adv/adv_cifar.npy") x_train = x_train.astype("float32") x_train = (x_train / 255.0) - (1.0 - CLIP_MAX) x_test = x_test.astype("float32") x_test = (x_test / 255.0) - (1.0 - CLIP_MAX) if args.lsa: # test_lsa = fetch_lsa(model, x_train, x_test, "test", layer_names, args) # write_file(path_file='./sa/lsa_{}.txt'.format(args.d), data=test_lsa) test_lsa = fetch_lsa(model, x_train, x_target, "adversarial", layer_names, args) write_file(path_file='./sa/lsa_adversarial_{}.txt'.format(args.d), data=test_lsa) if args.dsa: # test_dsa = fetch_dsa(model, x_train, x_test, "test", layer_names, args) # write_file(path_file='./sa/dsa_{}.txt'.format(args.d), data=test_dsa) test_dsa = fetch_dsa(model, x_train, x_target, "adversarial", layer_names, args) write_file(path_file='./sa/dsa_adversarial_{}.txt'.format(args.d), data=test_dsa)
def retrain(model, args, layer_names, selectsize=100, attack='fgsm', measure='lsa', datatype='mnist'): (x_train, __), (__, __) = mnist.load_data() #npzfile=np.load('mnist.npz') #y_train= npzfile['y_train'] #x_train= npzfile['x_train'] x_train = x_train.astype("float32").reshape(-1, 28, 28, 1) x_train = (x_train / 255.0) - (1.0 - CLIP_MAX) npzfile = np.load('./adv/data/mnist/mnist_' + attack + '_compound8.npz') y_test = npzfile['y_test'] x_test = npzfile['x_test'] x_target = x_test target_lst = [] #baselines =['LSA','DSA','CES','MCP','SRS','AAL'] if measure == 'SRS': x_select, y_select = select_rondom(selectsize, x_target, x_target, y_test) if measure == 'MCP': x_select, y_select = select_my_optimize(model, selectsize, x_target, y_test) if measure == 'LSA': target_lst = fetch_lsa(model, x_train, x_target, attack, layer_names, args) if measure == 'DSA': target_lst = fetch_dsa(model, x_train, x_target, attack, layer_names, args) if measure == 'AAL': path = "./mnist_finalResults/mnist_" + attack + "_compound8_result.csv" csv_data = pd.read_csv(path, header=None) target_lst = [] for i in range(len(csv_data.values.T)): target_lst.append(csv_data.values.T[i]) if measure == 'CES': indexlst = condition.conditional_sample(model, x_target, selectsize) x_select, y_select = select_from_index(selectsize, x_target, indexlst, y_test) elif measure not in ['SRS', 'MCP']: x_select, y_select = select_from_large(selectsize, x_target, target_lst, y_test) y_select = np_utils.to_categorical(y_select, 10) y_test = np_utils.to_categorical(y_test, 10) score = model.evaluate(x_target, y_test, verbose=0) #print('Test Loss: %.4f' % score[0]) print('Before retrain, Test accuracy: %.4f' % score[1]) origin_acc = score[1] #sgd = optimizers.SGD(lr=0.01) #loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"] model.compile(loss='categorical_crossentropy', optimizer="adadelta", metrics=['accuracy']) retrain_acc = 0 model.fit(x_select, y_select, batch_size=100, epochs=5, shuffle=True, verbose=1, validation_data=(x_target, y_test)) score = model.evaluate(x_target, y_test, verbose=0) retrain_acc = score[1] return retrain_acc, origin_acc
def retrain(model,args,layer_names,selectsize=100,attack='fgsm',measure='lsa',datatype='mnist'): (x_train, __), (__, __) = mnist.load_data() #npzfile=np.load('mnist.npz') #y_train= npzfile['y_train'] #x_train= npzfile['x_train'] x_train = x_train.astype("float32").reshape(-1,28,28,1) x_train = (x_train / 255.0) - (1.0 - CLIP_MAX) npzfile=np.load('./adv/data/mnist/mnist_'+attack+'_compound8.npz') y_test= npzfile['y_test'] x_test= npzfile['x_test'] x_target =x_test target_lst=[] if measure=='my_after_random': selected_lst = np.random.choice(range(len(x_target)),replace=False,size=selectsize*2) selected_lst=list(selected_lst) x_tmp=x_target[selected_lst] target_lst = fetch_our_measure(model, x_tmp) if measure=='my_entropy': target_lst = fetch_our_entropy(model, x_target) if measure=='random': x_select,y_select = select_rondom(selectsize,x_target,x_target,y_test) if measure=='my_optimize': x_select,y_select = select_my_optimize(model,selectsize,x_target,y_test) if measure=='lsa': target_lst = fetch_lsa(model, x_train, x_target, attack, layer_names, args) if measure=='dsa': target_lst = fetch_dsa(model, x_train, x_target, attack, layer_names, args) if measure=='my': target_lst = fetch_our_measure(model, x_target) if measure=='adaptive': path= "./mnist_finalResults/mnist_"+attack+"_compound8_result.csv" csv_data = pd.read_csv(path,header=None) target_lst =[] for i in range(len(csv_data.values.T)): target_lst.append(csv_data.values.T[i]) if measure=='conditional': indexlst = condition.conditional_sample(model,x_target,selectsize) x_select,y_select = select_from_index(selectsize,x_target,indexlst,y_test) elif measure not in ['random','my_optimize']: x_select,y_select = select_from_large(selectsize, x_target, target_lst,y_test) y_select = np_utils.to_categorical(y_select, 10) y_test = np_utils.to_categorical(y_test, 10) score = model.evaluate(x_target, y_test,verbose=0) #print('Test Loss: %.4f' % score[0]) print('Before retrain, Test accuracy: %.4f'% score[1]) origin_acc=score[1] #sgd = optimizers.SGD(lr=0.01) #loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"] model.compile(loss='categorical_crossentropy', optimizer="adadelta", metrics=['accuracy']) retrain_acc=0 if measure in ['my','my_optimize']: retrain_acc=0 #model.fit(x_select, y_select, batch_size=100, epochs=5, shuffle=True,verbose=1, validation_data=(x_target, y_test)) #score = model.evaluate(x_target, y_test,verbose=0) #retrain_acc=score[1] for i in range(5): model.fit(x_select, y_select, batch_size=100, epochs=1, shuffle=True,verbose=1, validation_data=(x_target, y_test)) score = model.evaluate(x_target, y_test,verbose=0) if score[1]>retrain_acc: retrain_acc=score[1] print('After retrain, Test accuracy: %.4f'% score[1]) else: model.fit(x_select, y_select, batch_size=100, epochs=5, shuffle=True,verbose=1, validation_data=(x_target, y_test)) score = model.evaluate(x_target, y_test,verbose=0) retrain_acc=score[1] return retrain_acc,origin_acc