import load_data as ld import numpy as np import random from pathlib import Path import torch import setproctitle import time import math import os if __name__ == "__main__": args = ld.argparser(dataset='mnist', metric='euclidean', epsilon=1.58, alpha=0.01, delta=0.25) # args = ld.argparser(dataset='cifar', metric='euclidean', epsilon=0.2453, alpha=0.05, delta=0) setproctitle.setproctitle('python') print('dataset: {dataset}\t\t' 'metric: {metric}\t\t' 'epsilon: {epsilon}\t\t' 'alpha: {alpha}\t\t' 'delta: {delta}\t\t'.format(dataset=args.dataset, metric=args.metric, epsilon=args.epsilon, alpha=args.alpha, delta=args.delta)) #### choose to use gpu if available
raise ImportError("The 'scipy' module could not be loaded. " + "It is required for the 'brute_force' method " + "for building a knn similarity graph.") d = _spd.pdist(X, metric=metric) D = _spd.squareform(d) rank = np.argsort(D, axis=1) neighbors = rank[:, 0:k] k_nbr = neighbors[:, -1] radii = D[np.arange(n), k_nbr] return neighbors, radii if __name__ == "__main__": args = ld.argparser(dataset='mnist', metric='infinity', k=50) # args = ld.argparser(dataset='cifar', metric='euclidean', alpha=0.05) setproctitle.setproctitle('preliminary') #### load the datasets if args.dataset == 'mnist': train_loader, _, valid_loader = ld.mnist_loaders(path='./data/' + args.dataset, seed=args.seed, ratio=args.ratio) for i, (X, y) in enumerate(train_loader): train_data = X.view(-1, 28 * 28).numpy() elif args.dataset == 'fmnist': train_loader, _, valid_loader = ld.fashion_mnist_loaders( path='./data/' + args.dataset, seed=args.seed, ratio=args.ratio)
#### #### strategy: cover the top-q densest images with union of rectangles as the robust region, #### then expand them by epsilon to obtain the error region (rects-complement). #### import load_data as ld import numpy as np import random from pathlib import Path import setproctitle import os if __name__ == "__main__": args = ld.argparser(dataset='mnist', metric='infinity', epsilon=0.3, q=0.703, clusters=10) # args = ld.argparser(dataset='cifar', metric='infinity', epsilon=0.007843, q=0.683, clusters=10) setproctitle.setproctitle('python') print('dataset: {dataset}\t\t' 'metric: {metric}\t\t' 'epsilon: {epsilon}\t\t' 'k: {k}\t\t' 'q: {q}\t\t' '#clusters: {clusters}\t\t' '#iter: {iter}\t\t' '#repeat: {repeat}'.format(dataset=args.dataset, metric=args.metric, epsilon=args.epsilon,
#### l2-norm bounded perturbations #### tune the number of clusters for the given epsilon using binary search over q import load_data as ld import numpy as np import random from pathlib import Path import setproctitle import os if __name__ == "__main__": args = ld.argparser(dataset='mnist', metric='infinity', epsilon=0.3, alpha=0.01) # args = ld.argparser(dataset='cifar', metric='infinity', epsilon=0.007843, alpha=0.05) setproctitle.setproctitle('python') print('dataset: {dataset}\t\t' 'metric: {metric}\t\t' 'epsilon: {epsilon}\t\t' 'alpha: {alpha}\t\t' 'k: {k}\t\t' '#iter: {iter}\t\t'.format( dataset=args.dataset, metric=args.metric, epsilon=args.epsilon, alpha=args.alpha, k=args.k, iter=args.iter)) #### define results path res_filepath = (os.path.dirname('./results/coarse_tune/'+args.proctitle)+'/alpha_'+str(args.alpha)) print("saving file to {}".format(res_filepath+'/epsilon_'+str(args.epsilon))) if not os.path.exists(res_filepath): os.makedirs(res_filepath) #### load the datasets if args.dataset == 'mnist': train_loader, test_loader, valid_loader = ld.mnist_loaders(path='./data/'+args.dataset, seed=args.seed,