def conv2d(layer): h, w = layer.get_config()['kernel_size'] d = layer.input_shape[-1] n = layer.output_shape[-1] s0, s1 = layer.get_config()['strides'] W = layer.get_weights()[0] B = layer.get_weights()[1] module = modules.Convolution(filtersize=(h, w, d, n), stride=(s0, s1)) module.W = W module.B = B activation_module = get_activation_lrpmodule(layer.activation) return module, activation_module
def roar_kar(keep, random=False, train_only=False): logdir = 'tf_logs/standard/' def get_savedir(): savedir = logdir.replace('tf_logs', 'KAR' if keep else 'ROAR') if not os.path.exists(savedir): os.makedirs(savedir) return savedir # ratio = 0.1 percentiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] attribution_methods = ['normal', 'LRP', 'proposed_method'] if not train_only: DNN = model_io.read('../models/MNIST/LeNet-5.nn') for v in attribution_methods: batch_size = 128 print("{} Step is start".format(v)) if random: print("{} percentile Remove".format(v)) occlude_dataset(DNN=DNN, attribution=v, percentiles=percentiles, random=True, keep=keep, batch_size=batch_size, savedir=get_savedir()) else: print("{} Random Remove".format(v)) occlude_dataset(DNN=DNN, attribution=v, percentiles=percentiles, random=False, keep=keep, batch_size=batch_size, savedir=get_savedir()) print("{} : occlude step is done".format(v)) print("ress record") ress = {k: [] for k in attribution_methods} for _ in range(3): for v in attribution_methods: res = [] for p in percentiles: occdir = get_savedir() + '{}_{}_{}.pickle'.format('{}', v, p) occdir_y = get_savedir() + '{}_{}_{}_{}.pickle'.format( '{}', v, p, 'label') data_train = unpickle(occdir.format('train')) # data_test = unpickle(occdir.format('test')) Xtrain = np.array(data_train) Ytrain = unpickle(occdir_y.format('train')) Ytrain = np.array(Ytrain) Xtest = data_io.read('../data/MNIST/test_images.npy') Ytest = data_io.read('../data/MNIST/test_labels.npy') print("check : {}".format(Ytrain.shape)) Xtest = scale(Xtest) Xtest = np.reshape(Xtest, [Xtest.shape[0], 28, 28, 1]) Xtest = np.pad(Xtest, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant', constant_values=(-1., )) Ix = Ytest[:, 0].astype(int) Ytest = np.zeros([Xtest.shape[0], np.unique(Ytest).size]) Ytest[np.arange(Ytest.shape[0]), Ix] = 1 print(occdir) # DNN = model_io.read('../models/MNIST/LeNet-5.nn') DNN = modules.Sequential([ modules.Convolution(filtersize=(5,5,1,10),stride = (1,1)),\ modules.Rect(),\ modules.SumPool(pool=(2,2),stride=(2,2)),\ modules.Convolution(filtersize=(5,5,10,25),stride = (1,1)),\ modules.Rect(),\ modules.SumPool(pool=(2,2),stride=(2,2)),\ modules.Convolution(filtersize=(4,4,25,100),stride = (1,1)),\ modules.Rect(),\ modules.SumPool(pool=(2,2),stride=(2,2)),\ modules.Convolution(filtersize=(1,1,100,10),stride = (1,1)),\ modules.Flatten() ]) print("training...") DNN.train(X=Xtrain,\ Y=Ytrain,\ Xval=Xtest,\ Yval=Ytest,\ iters=10**5,\ lrate=0.0001,\ # status = 2,\ batchsize = 128 ) # ypred = DNN.forward(Xtest) acc = np.mean( np.argmax(DNN.forward(Xtest), axis=1) == np.argmax(Ytest, axis=1)) del DNN print('metric model test accuracy is: {:0.4f}'.format(acc)) res.append(acc) print("End of {}:training, accuracy...".format(_)) ress[v].append(res) print("metric...") res_mean = {v: np.mean(v, axis=0) for v in ress.item()} print(res_mean) return res_mean
Xtest = np.pad(Xtest, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant', constant_values=(-1., )) #transform numeric class labels to indicator vectors. I = Ytrain[:, 0].astype(int) Ytrain = np.zeros([Xtrain.shape[0], np.unique(Ytrain).size]) Ytrain[np.arange(Ytrain.shape[0]), I] = 1 I = Ytest[:, 0].astype(int) Ytest = np.zeros([Xtest.shape[0], np.unique(Ytest).size]) Ytest[np.arange(Ytest.shape[0]), I] = 1 #model a network according to LeNet-5 architecture lenet = modules.Sequential([ modules.Convolution(filtersize=(5,5,1,10),stride = (1,1)),\ modules.Rect(),\ modules.SumPool(pool=(2,2),stride=(2,2)),\ modules.Convolution(filtersize=(5,5,10,25),stride = (1,1)),\ modules.Rect(),\ modules.SumPool(pool=(2,2),stride=(2,2)),\ modules.Convolution(filtersize=(4,4,25,100),stride = (1,1)),\ modules.Rect(),\ modules.SumPool(pool=(2,2),stride=(2,2)),\ modules.Convolution(filtersize=(1,1,100,10),stride = (1,1)),\ modules.Flatten() ]) #train the network. lenet.train( X=Xtrain,\ Y=Ytrain,\