def __init__(self): DataLoader.__init__(self) self.dataset_name = "cifar10" self.n_train = 45000 self.n_val = 5000 self.n_test = 10000 self.seed = Cfg.seed if Cfg.ad_experiment: self.n_classes = 2 else: self.n_classes = 10 Cfg.n_batches = int(np.ceil(self.n_train * 1. / Cfg.batch_size)) self.data_path = "../data/cifar-10-batches-py/" self.on_memory = True Cfg.store_on_gpu = True # load data from disk self.load_data()
def __init__(self): DataLoader.__init__(self) self.dataset_name = Cfg.dataset self.n_train = Cfg.n_train self.n_val = Cfg.n_val self.n_test = Cfg.n_test # self.out_frac = Cfg.out_frac self.image_height = Cfg.image_height self.image_width = Cfg.image_width self.channels = Cfg.channels self.seed = Cfg.seed if Cfg.ad_experiment: self.n_classes = 2 else: self.n_classes = 6 #there are 6 different weather types, however this should not be used Cfg.n_batches = int(np.ceil(self.n_train * 1. / Cfg.batch_size)) self.data_path = './data/%s'%self.dataset_name # not being used self.on_memory = True Cfg.store_on_gpu = True # load data from disk self.load_data()
def __init__(self): DataLoader.__init__(self) self.dataset_name = "lhc" self.n_train = 50000 self.n_val = 10000 self.n_test = 10000 self.seed = Cfg.seed if Cfg.ad_experiment: self.n_classes = 2 print("[INFO ]: ", "Configuring experiment for Anomaly Detection [AD]") else: self.n_classes = 10 print("[INFO ]: ", "Configuring experiment as Classification Problem") Cfg.n_batches = int(np.ceil(self.n_train * 1. / float(Cfg.batch_size))) # print("[INFO ]: ", "Current Working Directory",os.getcwd()) self.data_path = "../data/" self.on_memory = True Cfg.store_on_gpu = True # load data from disk self.load_data()
def __init__(self): DataLoader.__init__(self) self.seed = Cfg.seed self.n_classes = 2 self.on_memory = True Cfg.store_on_gpu = False # load data from disk self.load_dataset_path()
def accumulate( training_hparams: hparams.TrainingHparams, model: Model, train_loader: DataLoader, data_order_seed: int = None, suffix: str = '' ): """Accumulate the gradient for one training epoch. Args: * training_hparams: The training hyperparameters whose schema is specified in hparams.py. * model: The model to train. Must be a models.base.Model * train_loader: The training data. Must be a datasets.base.DataLoader * data_order_seed: The RNG seed for data shuffling. """ # Adapt for FP16. if training_hparams.apex_fp16: if NO_APEX: raise ImportError('Must install nvidia apex to use this model.') model = apex.amp.initialize(model, loss_scale='dynamic', verbosity=0) # Handle parallelism if applicable. if get_platform().is_distributed: model = DistributedDataParallel(model, device_ids=[get_platform().rank]) elif get_platform().is_parallel: model = DataParallel(model) train_loader.shuffle(data_order_seed) for it, (examples, labels) in enumerate(train_loader): examples = examples.to(device=get_platform().torch_device) labels = labels.to(device=get_platform().torch_device) model.eval() loss = model.loss_criterion(model(examples), labels) if training_hparams.apex_fp16: with apex.amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() get_platform().barrier()
def __init__(self): DataLoader.__init__(self) self.dataset_name = "imagenet" self.n_train = 1281167 self.n_val = 50000 self.n_test = -1 self.n_classes = 1000 Cfg.n_batches = int(np.ceil(self.n_train * 1. / Cfg.batch_size)) self.data_path = data_path self.port_train = port_train self.port_val = port_val self.on_memory = False Cfg.store_on_gpu = False
def __init__(self): DataLoader.__init__(self) self.dataset_name = "mnist" self.n_train = 50000 self.n_val = 10000 self.n_test = 10000 self.n_classes = 10 Cfg.n_batches = int(np.ceil(self.n_train * 1. / Cfg.batch_size)) self.data_path = "../data/" self.on_memory = True Cfg.store_on_gpu = True # load data from disk self.load_data()
def __init__(self): DataLoader.__init__(self) self.dataset_name = "gtsrb" # GTSRB stop sign images (class 14) self.n_train = 780 self.n_val = 0 self.n_test = 290 # 270 normal examples and 20 adversarial examples self.seed = Cfg.seed self.n_classes = 2 self.data_path = "../data/data_gtsrb/" self.on_memory = True Cfg.store_on_gpu = True # load data from disk self.load_data()
def train(training_hparams: hparams.TrainingHparams, model: Model, train_loader: DataLoader, output_location: str, callbacks: typing.List[typing.Callable] = [], start_step: Step = None, end_step: Step = None): """The main training loop for this framework. Args: * training_hparams: The training hyperparameters whose schema is specified in hparams.py. * model: The model to train. Must be a models.base.Model * train_loader: The training data. Must be a datasets.base.DataLoader * output_location: The string path where all outputs should be stored. * callbacks: A list of functions that are called before each training step and once more after the last training step. Each function takes five arguments: the current step, the output location, the model, the optimizer, and the logger. Callbacks are used for running the test set, saving the logger, saving the state of the model, etc. The provide hooks into the training loop for customization so that the training loop itself can remain simple. * start_step: The step at which the training data and learning rate schedule should begin. Defaults to step 0. * end_step: The step at which training should cease. Otherwise, training will go for the full `training_hparams.training_steps` steps. """ # Create the output location if it doesn't already exist. if not get_platform().exists(output_location) and get_platform( ).is_primary_process: get_platform().makedirs(output_location) # Get the optimizer and learning rate schedule. model.to(get_platform().torch_device) optimizer = optimizers.get_optimizer(training_hparams, model) step_optimizer = optimizer lr_schedule = optimizers.get_lr_schedule(training_hparams, optimizer, train_loader.iterations_per_epoch) # Adapt for FP16. if training_hparams.apex_fp16: if NO_APEX: raise ImportError('Must install nvidia apex to use this model.') model, step_optimizer = apex.amp.initialize(model, optimizer, loss_scale='dynamic', verbosity=0) # Handle parallelism if applicable. if get_platform().is_distributed: model = DistributedDataParallel(model, device_ids=[get_platform().rank]) elif get_platform().is_parallel: model = DataParallel(model) # Get the random seed for the data order. data_order_seed = training_hparams.data_order_seed # Restore the model from a saved checkpoint if the checkpoint exists. cp_step, cp_logger = restore_checkpoint(output_location, model, optimizer, train_loader.iterations_per_epoch) start_step = cp_step or start_step or Step.zero( train_loader.iterations_per_epoch) logger = cp_logger or MetricLogger() with warnings.catch_warnings(): # Filter unnecessary warning. warnings.filterwarnings("ignore", category=UserWarning) for _ in range(start_step.iteration): lr_schedule.step() # Determine when to end training. end_step = end_step or Step.from_str(training_hparams.training_steps, train_loader.iterations_per_epoch) if end_step <= start_step: return # The training loop. for ep in range(start_step.ep, end_step.ep + 1): # Ensure the data order is different for each epoch. train_loader.shuffle(None if data_order_seed is None else ( data_order_seed + ep)) for it, (examples, labels) in enumerate(train_loader): # Advance the data loader until the start epoch and iteration. if ep == start_step.ep and it < start_step.it: continue # Run the callbacks. step = Step.from_epoch(ep, it, train_loader.iterations_per_epoch) for callback in callbacks: callback(output_location, step, model, optimizer, logger) # Exit at the end step. if ep == end_step.ep and it == end_step.it: return # Otherwise, train. examples = examples.to(device=get_platform().torch_device) labels = labels.to(device=get_platform().torch_device) step_optimizer.zero_grad() model.train() loss = model.loss_criterion(model(examples), labels) if training_hparams.apex_fp16: with apex.amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # Step forward. Ignore extraneous warnings that the lr_schedule generates. step_optimizer.step() with warnings.catch_warnings(): # Filter unnecessary warning. warnings.filterwarnings("ignore", category=UserWarning) lr_schedule.step() get_platform().barrier()
def distill( training_hparams: hparams.TrainingHparams, distill_hparams: hparams.DistillHparams, student: Model, teacher: Model, train_loader: DataLoader, output_location: str, callbacks: typing.List[typing.Callable] = [], start_step: Step = None, end_step: Step = None ): """The main training loop for this framework. Args: * training_hparams: The training hyperparameters whose schema is specified in hparams.py. * distll_hparams: The knowledge distillation hyperparameters whose schema is specified in hparams.py. * student: The student model to train. Must be a models.base.Model * teacher: The teacher model to distill the knowledge. Must be a models.base.Model * train_loader: The training data. Must be a datasets.base.DataLoader * output_location: The string path where all outputs should be stored. * callbacks: A list of functions that are called before each training step and once more after the last training step. Each function takes five arguments: the current step, the output location, the model, the optimizer, and the logger. Callbacks are used for running the test set, saving the logger, saving the state of the model, etc. The provide hooks into the training loop for customization so that the training loop itself can remain simple. * start_step: The step at which the training data and learning rate schedule should begin. Defaults to step 0. * end_step: The step at which training should cease. Otherwise, training will go for the full `training_hparams.training_steps` steps. """ import torch import torch.nn as nn import torch.nn.functional as F # Create the output location if it doesn't already exist. if not get_platform().exists(output_location) and get_platform().is_primary_process: get_platform().makedirs(output_location) # Get the optimizer and learning rate schedule. student.to(get_platform().torch_device) teacher.to(get_platform().torch_device) optimizer = optimizers.get_optimizer(training_hparams, student) step_optimizer = optimizer lr_schedule = optimizers.get_lr_schedule(training_hparams, optimizer, train_loader.iterations_per_epoch) ce_loss_fct = nn.KLDivLoss(reduction="batchmean") if distill_hparams.alpha_mse > 0.0: mse_loss_fct = nn.MSELoss(reduction='sum') if distill_hparams.alpha_cos > 0.0: cos_loss_fct = nn.CosineEmbeddingLoss(reduction='mean') # Adapt for FP16. if training_hparams.apex_fp16: if NO_APEX: raise ImportError('Must install nvidia apex to use this model.') (student, teacher), step_optimizer = apex.amp.initialize( [student, teacher], optimizer, loss_scale='dynamic', verbosity=0 ) # Handle parallelism if applicable. if get_platform().is_distributed: student = DistributedDataParallel(student, device_ids=[get_platform().rank]) teacher = DistributedDataParallel(teacher, device_ids=[get_platform().rank]) elif get_platform().is_parallel: student = DataParallel(student) teacher = DataParallel(teacher) # Get the random seed for the data order. data_order_seed = training_hparams.data_order_seed # Restore the model from a saved checkpoint if the checkpoint exists. cp_step, cp_logger = restore_checkpoint(output_location, student, optimizer, train_loader.iterations_per_epoch) start_step = cp_step or start_step or Step.zero(train_loader.iterations_per_epoch) logger = cp_logger or MetricLogger() with warnings.catch_warnings(): # Filter unnecessary warning. warnings.filterwarnings("ignore", category=UserWarning) for _ in range(start_step.iteration): lr_schedule.step() # Determine when to end training. end_step = end_step or Step.from_str(training_hparams.training_steps, train_loader.iterations_per_epoch) if end_step <= start_step: return # The training loop. for ep in range(start_step.ep, end_step.ep + 1): # Ensure the data order is different for each epoch. train_loader.shuffle(None if data_order_seed is None else (data_order_seed + ep)) for it, (examples, labels) in enumerate(train_loader): # Advance the data loader until the start epoch and iteration. if ep == start_step.ep and it < start_step.it: continue # Run the callbacks. step = Step.from_epoch(ep, it, train_loader.iterations_per_epoch) for callback in callbacks: callback(output_location, step, student, optimizer, logger) # Exit at the end step. if ep == end_step.ep and it == end_step.it: return # Otherwise, train. examples = examples.to(device=get_platform().torch_device) labels = labels.to(device=get_platform().torch_device) loss = 0.0 step_optimizer.zero_grad() student.train() teacher.eval() student_outputs = student(examples) with torch.no_grad(): teacher_outputs = teacher(examples) s_logits = student_outputs t_logits = teacher_outputs # KL Divergence loss for the knowledge distillation loss_ce = ce_loss_fct( F.log_softmax(s_logits / distill_hparams.temperature, dim=-1), F.softmax(t_logits / distill_hparams.temperature, dim=-1), ) * distill_hparams.temperature**2 loss += distill_hparams.alpha_ce * loss_ce if distill_hparams.alpha_cls > 0.0: loss_cls = student.loss_criterion(student_outputs, labels) loss += distill_hparams.alpha_cls * loss_cls if distill_hparams.alpha_mse > 0.0: loss_mse = mse_loss_fct(s_logits, t_logits) / s_logits.size(0) loss += distill_hparams.alpha_mse * loss_mse if training_hparams.apex_fp16: with apex.amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # Step forward. Ignore extraneous warnings that the lr_schedule generates. step_optimizer.step() with warnings.catch_warnings(): # Filter unnecessary warning. warnings.filterwarnings("ignore", category=UserWarning) lr_schedule.step() get_platform().barrier()
def grasp( training_hparams: hparams.TrainingHparams, model: Model, parameter_list: list, train_loader: DataLoader, data_order_seed: int = None, suffix: str = '' ): """For the implementation of GraSP. Args: * training_hparams: The training hyperparameters whose schema is specified in hparams.py. * model: The model to train. Must be a models.base.Model * train_loader: The training data. Must be a datasets.base.DataLoader * data_order_seed: The RNG seed for data shuffling. """ # Adapt for FP16. if training_hparams.apex_fp16: if NO_APEX: raise ImportError('Must install nvidia apex to use this model.') model = apex.amp.initialize(model, loss_scale='dynamic', verbosity=0) # Handle parallelism if applicable. if get_platform().is_distributed: model = DistributedDataParallel(model, device_ids=[get_platform().rank]) elif get_platform().is_parallel: model = DataParallel(model) train_loader.shuffle(data_order_seed) # First gradient without computational graph stopped_grads = 0 for it, (examples, labels) in enumerate(train_loader): examples = examples.to(device=get_platform().torch_device) labels = labels.to(device=get_platform().torch_device) model.eval() output = model(examples) / 200.0 # temp = 200 loss = model.loss_criterion(output, labels) # if training_hparams.apex_fp16: # with apex.amp.scale_loss(loss, optimizer) as scaled_loss: # scaled_loss.backward() # else: # loss.backward() grads = torch.autograd.grad(loss, parameter_list, create_graph=False) flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None]) stopped_grads += flatten_grads train_loader.shuffle(None if data_order_seed is None else (data_order_seed + 1)) # Second gradient vector with computational graph for it, (examples, labels) in enumerate(train_loader): examples = examples.to(device=get_platform().torch_device) labels = labels.to(device=get_platform().torch_device) model.eval() output = model(examples) / 200.0 # temp = 200 loss = model.loss_criterion(output, labels) # if training_hparams.apex_fp16: # with apex.amp.scale_loss(loss, optimizer) as scaled_loss: # scaled_loss.backward() # else: # loss.backward() grads = torch.autograd.grad(loss, parameter_list, create_graph=True) flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None]) gnorm = (stopped_grads * flatten_grads).sum() gnorm.backward() get_platform().barrier()