if __name__ == "__main__": parser = argparse.ArgumentParser() # ol: target classifier is from the adapttive attack # kkt: target is from kkt attack # real: actual classifier, compare: compare performance # of kkt attack and adaptive attack using same stop criteria # Global params parser.add_argument('--model_arch', default='lenet', choices=dnn_utils.get_model_names(), help='Victim model architecture') parser.add_argument('--dataset', default='mnist', choices=datasets.get_dataset_names(), help="Which dataset to use?") parser.add_argument('--batch_size', default=-1, type=int, help='Batch size while training models') parser.add_argument('--online_alg_criteria', default='norm', choices=['max_loss', 'norm'], help='Stop criteria of online alg: max_loss or norm') parser.add_argument('--poison_model_path', type=str, help='Path to saved poisoned-classifier') parser.add_argument('--log_path', type=str, default="./data/logs",
import math import argparse import numpy as np import torch import torch.utils.data import torchvision.transforms as transforms from datasets import create_dataset, get_dataset_names parser = argparse.ArgumentParser(description='Dataset statistics calculator') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('--dataset', default='tum', help='dataset type: ' + ' | '.join(get_dataset_names()) + ' (default: tum)') parser.add_argument('--input-channels', default=3, type=int, dest='input_channels') parser.add_argument('-b', '--batch-size', default=128, type=int) parser.add_argument('-j', '--workers', default=8, type=int) args = parser.parse_args() # load the dataset dataset = create_dataset(args.dataset, root_dir=args.data, type='train', input_channels=args.input_channels,
import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_probability as tfp from tqdm import tqdm import utils as utl import models as m import datasets as d tfd = tfp.distributions tfm = tf.math # Set root of where data is d.root = '../../datasets/raw/' #%% Load data dataset_names = d.get_dataset_names() name = dataset_names[5] data, X_train, X_val, X_test = d.load_data(name) M = X_train.shape[1] print(f'\nX_train.shape = {X_train.shape}') print(f'\nX_val.shape = {X_val.shape}') print(f'\nX_train.shape = {X_train.shape}') print(name + ' data loaded...') #%% Parameters model_name = 'TT' epochs = 500 N_init = 5 # Number of random initializations to do batch_size = 1000
import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms from models import create_model, get_model_names from datasets import create_dataset, get_dataset_names, DataLoaderCache model_names = get_model_names() dataset_names = get_dataset_names() # # parse command-line arguments # parser = argparse.ArgumentParser(description='PyTorch OdometryNet Training') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('--dataset', default='tum', help='dataset type: ' + ' | '.join(dataset_names) + ' (default: tum)') parser.add_argument('--model-dir', type=str, default='', help='path to desired output directory for saving model '