saver = tf.train.Saver(max_to_keep=5) tf.summary.scalar('accuracy adv train', model.accuracy) tf.summary.scalar('accuracy adv', model.accuracy) tf.summary.scalar('xent adv train', model.xent / batch_size) tf.summary.scalar('xent adv', model.xent / batch_size) tf.summary.image('images adv train', model.x_input) merged_summaries = tf.summary.merge_all() # keep the configuration file with the model for reproducibility shutil.copy('config.json', model_dir) with tf.Session() as sess: # initialize data augmentation cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess, model) # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) sess.run(tf.global_variables_initializer()) training_time = 0.0 # Main training loop for ii in range(max_num_training_steps): x_batch, y_batch = cifar.train_data.get_next_batch(batch_size, multiple_passes=True) # Compute Adversarial Perturbations start = timer() end = timer()
def train(config): # seeding randomness tf.set_random_seed(config.training.tf_random_seed) np.random.seed(config.training.np_random_seed) # Setting up training parameters max_num_training_steps = config.training.max_num_training_steps step_size_schedule = config.training.step_size_schedule weight_decay = config.training.weight_decay momentum = config.training.momentum batch_size = config.training.batch_size adversarial_training = config.training.adversarial_training eval_during_training = config.training.eval_during_training if eval_during_training: num_eval_steps = config.training.num_eval_steps # Setting up output parameters num_output_steps = config.training.num_output_steps num_summary_steps = config.training.num_summary_steps num_checkpoint_steps = config.training.num_checkpoint_steps # Setting up the data and the model data_path = config.data.data_path raw_cifar = cifar10_input.CIFAR10Data(data_path) global_step = tf.contrib.framework.get_or_create_global_step() model = resnet.Model(config.model) # uncomment to get a list of trainable variables # model_vars = tf.trainable_variables() # slim.model_analyzer.analyze_vars(model_vars, print_info=True) # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule] boundaries = boundaries[1:] values = [sss[1] for sss in step_size_schedule] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values) total_loss = model.mean_xent + weight_decay * model.weight_decay_loss optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) train_step = optimizer.minimize(total_loss, global_step=global_step) # Set up adversary attack = SpatialAttack(model, config.attack) # Setting up the Tensorboard and checkpoint outputs model_dir = config.model.output_dir if eval_during_training: eval_dir = os.path.join(model_dir, 'eval') if not os.path.exists(eval_dir): os.makedirs(eval_dir) # We add accuracy and xent twice so we can easily make three types of # comparisons in Tensorboard: # - train vs eval (for a single run) # - train of different runs # - eval of different runs saver = tf.train.Saver(max_to_keep=3) tf.summary.scalar('accuracy_adv_train', model.accuracy, collections=['adv']) tf.summary.scalar('accuracy_adv', model.accuracy, collections=['adv']) tf.summary.scalar('xent_adv_train', model.xent / batch_size, collections=['adv']) tf.summary.scalar('xent_adv', model.xent / batch_size, collections=['adv']) tf.summary.image('images_adv_train', model.x_image, collections=['adv']) adv_summaries = tf.summary.merge_all('adv') tf.summary.scalar('accuracy_nat_train', model.accuracy, collections=['nat']) tf.summary.scalar('accuracy_nat', model.accuracy, collections=['nat']) tf.summary.scalar('xent_nat_train', model.xent / batch_size, collections=['nat']) tf.summary.scalar('xent_nat', model.xent / batch_size, collections=['nat']) tf.summary.image('images_nat_train', model.x_image, collections=['nat']) tf.summary.scalar('learning_rate', learning_rate, collections=['nat']) nat_summaries = tf.summary.merge_all('nat') os.environ["CUDA_VISIBLE_DEVICES"] = "1" gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: # initialize data augmentation if config.training.data_augmentation: cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess) else: cifar = raw_cifar # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) if eval_during_training: eval_summary_writer = tf.summary.FileWriter(eval_dir) sess.run(tf.global_variables_initializer()) training_time = 0.0 # Main training loop for ii in range(max_num_training_steps + 1): x_batch, y_batch = cifar.train_data.get_next_batch( batch_size, multiple_passes=True) noop_trans = np.zeros([len(x_batch), 3]) # Compute Adversarial Perturbations if adversarial_training: start = timer() x_batch_adv, adv_trans = attack.perturb(x_batch, y_batch, sess) end = timer() training_time += end - start else: x_batch_adv, adv_trans = x_batch, noop_trans nat_dict = { model.x_input: x_batch, model.y_input: y_batch, model.transform: noop_trans, model.is_training: False } adv_dict = { model.x_input: x_batch_adv, model.y_input: y_batch, model.transform: adv_trans, model.is_training: False } # Output to stdout if ii % num_output_steps == 0: nat_acc = sess.run(model.accuracy, feed_dict=nat_dict) adv_acc = sess.run(model.accuracy, feed_dict=adv_dict) print('Step {}: ({})'.format(ii, datetime.now())) print(' training nat accuracy {:.4}%'.format(nat_acc * 100)) print(' training adv accuracy {:.4}%'.format(adv_acc * 100)) if ii != 0: print(' {} examples per second'.format( num_output_steps * batch_size / training_time)) training_time = 0.0 # Tensorboard summaries if ii % num_summary_steps == 0: summary = sess.run(adv_summaries, feed_dict=adv_dict) summary_writer.add_summary(summary, global_step.eval(sess)) summary = sess.run(nat_summaries, feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) # Write a checkpoint if ii % num_checkpoint_steps == 0: saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step) if eval_during_training and ii % num_eval_steps == 0: evaluate(model, attack, sess, config, eval_summary_writer) # Actual training step start = timer() if adversarial_training: adv_dict[model.is_training] = True sess.run(train_step, feed_dict=adv_dict) else: nat_dict[model.is_training] = True sess.run(train_step, feed_dict=nat_dict) end = timer() training_time += end - start
def train(tf_seed, np_seed, train_steps, out_steps, summary_steps, checkpoint_steps, step_size_schedule, weight_decay, momentum, train_batch_size, epsilon, replay_m, model_dir, dataset, poison_alpha, poison_config, **kwargs): tf.compat.v1.set_random_seed(tf_seed) np.random.seed(np_seed) print('poison alpha = %f' % poison_alpha) model_dir = model_dir + '%s_m%d_eps%.1f_b%d' % ( dataset, replay_m, epsilon, train_batch_size ) # TODO Replace with not defaults # Setting up the data and the model poison_config_dict = utilities.config_to_namedtuple( utilities.get_config(poison_config)) print(poison_config_dict) data_path = get_path_dir(dataset=dataset, **kwargs) if dataset == 'cifar10': raw_data = cifar10_input.CIFAR10Data(data_path) elif dataset == 'cifar10_poisoned': raw_data = dataset_input.CIFAR10Data(poison_config_dict, seed=np_seed) else: raw_data = cifar100_input.CIFAR100Data(data_path) global_step = tf.compat.v1.train.get_or_create_global_step() with tpu_strategy.scope(): model = Model(mode='train', dataset=dataset, train_batch_size=train_batch_size) # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule][1:] values = [sss[1] for sss in step_size_schedule] learning_rate = tf.compat.v1.train.piecewise_constant( tf.cast(global_step, tf.int32), boundaries, values) optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum) # Optimizing computation total_loss = model.mean_xent + weight_decay * model.weight_decay_loss grads = optimizer.compute_gradients(total_loss) # Compute new image pert_grad = [g for g, v in grads if 'perturbation' in v.name] sign_pert_grad = tf.sign(pert_grad[0]) new_pert = model.pert + epsilon * sign_pert_grad clip_new_pert = tf.clip_by_value(new_pert, -epsilon, epsilon) assigned = tf.compat.v1.assign(model.pert, clip_new_pert) # Train no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in grads] with tf.control_dependencies([assigned]): min_step = optimizer.apply_gradients(no_pert_grad, global_step=global_step) tf.compat.v1.initialize_variables([model.pert]) # TODO: Removed from TF # Setting up the Tensorboard and checkpoint outputs if not os.path.exists(model_dir): os.makedirs(model_dir) saver = tf.compat.v1.train.Saver(max_to_keep=1) tf.compat.v1.summary.scalar('accuracy', model.accuracy) tf.compat.v1.summary.scalar('xent', model.xent / train_batch_size) tf.compat.v1.summary.scalar('total loss', total_loss / train_batch_size) merged_summaries = tf.compat.v1.summary.merge_all() gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=1.0) with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto( gpu_options=gpu_options)) as sess: print( '\n\n********** free training for epsilon=%.1f using m_replay=%d **********\n\n' % (epsilon, replay_m)) print( 'important params >>> \n model dir: %s \n dataset: %s \n training batch size: %d \n' % (model_dir, dataset, train_batch_size)) if dataset == 'cifar100': print( 'the ride for CIFAR100 is bumpy -- fasten your seatbelts! \n \ you will probably see the training and validation accuracy fluctuating a lot early in trainnig \n \ this is natural especially for large replay_m values because we see that mini-batch so many times.' ) # initialize data augmentation if dataset == 'cifar10': data = cifar10_input.AugmentedCIFAR10Data(raw_data, sess, model) elif dataset == 'cifar10_poisoned': data = raw_data else: data = cifar100_input.AugmentedCIFAR100Data(raw_data, sess, model) # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.compat.v1.summary.FileWriter( model_dir + '/train', sess.graph) eval_summary_writer = tf.compat.v1.summary.FileWriter(model_dir + '/eval') sess.run(tf.compat.v1.global_variables_initializer()) # Main training loop for ii in range(train_steps): if ii % replay_m == 0: x_batch, y_batch = data.train_data.get_next_batch( train_batch_size, multiple_passes=True) nat_dict = {model.x_input: x_batch, model.y_input: y_batch} x_eval_batch, y_eval_batch = data.eval_data.get_next_batch( train_batch_size, multiple_passes=True) eval_dict = { model.x_input: x_eval_batch, model.y_input: y_eval_batch } # Output to stdout if ii % summary_steps == 0: train_acc, summary = sess.run( [model.accuracy, merged_summaries], feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) val_acc, summary = sess.run([model.accuracy, merged_summaries], feed_dict=eval_dict) eval_summary_writer.add_summary(summary, global_step.eval(sess)) print('Step {}: ({})'.format(ii, datetime.now())) print( ' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%' .format(train_acc * 100, val_acc * 100)) sys.stdout.flush() # Tensorboard summaries elif ii % out_steps == 0: nat_acc = sess.run(model.accuracy, feed_dict=nat_dict) print('Step {}: ({})'.format(ii, datetime.now())) print(' training nat accuracy {:.4}%'.format(nat_acc * 100)) # Write a checkpoint if (ii + 1) % checkpoint_steps == 0: saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step) # Actual training step sess.run(min_step, feed_dict=nat_dict)
def train(config='configs/fannyconfig.json', save_root_path='/cluster/work/math/fanyang-broglil/CoreRepo', experiment_json_fname='experiments.json', local_json_dir_name='local_json_files', worstofk=None, attack_style=None, attack_limits=None, fo_epsilon=None, fo_step_size=None, fo_num_steps=None, lambda_core=None, num_ids=None, group_size=None, use_core=None, seed=None, save_in_local_json=True, this_repo=None): # reset default graph (needed for running locally with run_jobs_ray.py) tf.reset_default_graph() # get configs config_dict = utilities.get_config(config) config_dict_copy = copy.deepcopy(config_dict) config = utilities.config_to_namedtuple(config_dict) # seeding randomness if seed == None: seed = config.training.tf_random_seed else: config_dict_copy['training']['tf_random_seed'] = seed tf.set_random_seed(seed) np.random.seed(seed) # Setting up training parameters max_num_training_steps = config.training.max_num_training_steps step_size_schedule = config.training.step_size_schedule weight_decay = config.training.weight_decay momentum = config.training.momentum if group_size == None: group_size = config.training.group_size else: config_dict_copy['training']['group_size'] = int(group_size) if num_ids == None: num_ids = config.training.num_ids else: config_dict_copy['training']['num_ids'] = int(num_ids) if lambda_core == None: lambda_core = config.training.lambda_ else: config_dict_copy['training']['lambda_'] = float(lambda_core) if use_core == None: use_core = config.training.use_core else: config_dict_copy['training']['use_core'] = use_core batch_size = config.training.batch_size # number of groups with group size > 1 num_grouped_ids = batch_size - num_ids # number of unique ids needs to be larger than half the desired batch size # so that full batch can be filled up assert num_ids >= batch_size / group_size # currently, code is designed for groups of size 2 # assert batch_size % group_size == 0 adversarial_training = config.training.adversarial_training eval_during_training = config.training.eval_during_training if eval_during_training: num_eval_steps = config.training.num_eval_steps # Setting up output parameters num_output_steps = config.training.num_output_steps num_summary_steps = config.training.num_summary_steps num_checkpoint_steps = config.training.num_checkpoint_steps num_easyeval_steps = config.training.num_easyeval_steps # Setting up the data and the model data_path = config.data.data_path if config.data.dataset_name == "cifar-10": raw_iterator = cifar10_input.CIFAR10Data(data_path) elif config.data.dataset_name == "cifar-100": raw_iterator = cifar100_input.CIFAR100Data(data_path) elif config.data.dataset_name == "svhn": raw_iterator = svhn_input.SVHNData(data_path) else: raise ValueError("Unknown dataset name.") global_step = tf.train.get_or_create_global_step() model_family = config.model.model_family if model_family == "resnet": if config.attack.use_spatial and config.attack.spatial_method == 'fo': diffable = True else: diffable = False model = resnet.Model(config.model, num_ids, diffable) elif model_family == "vgg": if config.attack.use_spatial and config.attack.spatial_method == 'fo': # TODO: add differentiable transformer to vgg.py raise NotImplementedError model = vgg.Model(config.model, num_ids) # uncomment to get a list of trainable variables # model_vars = tf.trainable_variables() # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule] boundaries = boundaries[1:] values = [sss[1] for sss in step_size_schedule] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values) if use_core and lambda_core > 0: total_loss = (model.mean_xent + weight_decay * model.weight_decay_loss + lambda_core * model.core_loss) else: total_loss = model.mean_xent + weight_decay * model.weight_decay_loss optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) train_step = optimizer.minimize(total_loss, global_step=global_step) # Set up adversary if worstofk == None: worstofk = config.attack.random_tries else: config_dict_copy['attack']['random_tries'] = worstofk if fo_epsilon == None: fo_epsilon = config.attack.epsilon else: config_dict_copy['attack']['epsilon'] = fo_epsilon if fo_step_size == None: fo_step_size = config.attack.step_size else: config_dict_copy['attack']['step_size'] = fo_step_size if fo_num_steps == None: fo_num_steps = config.attack.num_steps else: config_dict_copy['attack']['num_steps'] = fo_num_steps # @ Luzius: incorporate being able to choose multiple transformations if attack_style == None: attack_style = 'rotate' # Training attack # L-inf attack if use_spatial is False and use_linf is True # spatial attack if use_spatial is True and use_linf is False # spatial random attack if spatial_method is 'random' # spatial PGD attack if spatial_method is 'fo' attack = SpatialAttack(model, config.attack, config.attack.spatial_method, worstofk, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # Different eval attacks # Random attack # L-inf attack if use_spatial is False and use_linf is True # random (worst-of-1) spatial attack if use_spatial is True # and use_linf is False attack_eval_random = SpatialAttack(model, config.attack, 'random', 1, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # First order attack # L-inf attack if use_spatial is False and use_linf is True # first-order spatial attack if use_spatial is True and use_linf is False attack_eval_fo = SpatialAttack(model, config.attack, 'fo', 1, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # Grid attack # spatial attack if use_spatial is True and use_linf is False # not executed for L-inf attacks attack_eval_grid = SpatialAttack(model, config.attack, 'grid', None, attack_limits) # TODO(christina): add L-inf attack with random restarts # ------------------START EXPERIMENT ------------------------- # Initialize the Repo print("==> Creating repo..") # Create repo object if it wasn't passed, comment out if repo has issues if this_repo == None: this_repo = exprepo.ExperimentRepo( save_in_local_json=save_in_local_json, json_filename=experiment_json_fname, local_dir_name=local_json_dir_name, root_dir=save_root_path) # Create new experiment if this_repo != None: exp_id = this_repo.create_new_experiment(config.data.dataset_name, model_family, worstofk, attack_style, attack_limits, lambda_core, num_grouped_ids, group_size, config_dict_copy) # Setting up the Tensorboard and checkpoint outputs model_dir = '%s/logdir/%s' % (save_root_path, exp_id) # We add accuracy and xent twice so we can easily make three types of # comparisons in Tensorboard: # - train vs eval (for a single run) # - train of different runs # - eval of different runs saver = tf.train.Saver(max_to_keep=3) tf.summary.scalar('accuracy_nat_train', model.accuracy, collections=['nat']) tf.summary.scalar('accuracy_nat', model.accuracy, collections=['nat']) tf.summary.scalar('xent_nat_train', model.xent / batch_size, collections=['nat']) tf.summary.scalar('xent_nat', model.xent / batch_size, collections=['nat']) tf.summary.image('images_nat_train', model.x_image, collections=['nat']) tf.summary.scalar('learning_rate', learning_rate, collections=['nat']) nat_summaries = tf.summary.merge_all('nat') # data augmentation used if config.training.data_augmentation_core is True x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), x_input_placeholder) with tf.Session() as sess: # initialize standard data augmentation if config.training.data_augmentation: if config.data.dataset_name == "cifar-10": data_iterator = cifar10_input.AugmentedCIFAR10Data( raw_iterator, sess) elif config.data.dataset_name == "cifar-100": data_iterator = cifar100_input.AugmentedCIFAR100Data( raw_iterator, sess) elif config.data.dataset_name == "svhn": data_iterator = svhn_input.AugmentedSVHNData( raw_iterator, sess) else: raise ValueError("Unknown dataset name.") else: data_iterator = raw_iterator eval_dict = { model.x_input: data_iterator.eval_data.xs, model.y_input: data_iterator.eval_data.ys, model.group: np.arange(0, batch_size, 1, dtype="int32"), model.transform: np.zeros([data_iterator.eval_data.n, 3]), model.is_training: False } # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) # if eval_during_training: eval_dir = os.path.join(model_dir, 'eval') os.makedirs(eval_dir, exist_ok=True) eval_summary_writer = tf.summary.FileWriter(eval_dir) sess.run(tf.global_variables_initializer()) training_time = 0.0 run_time_without_eval = 0.0 run_time_adv_ex_creation = 0.0 run_time_train_step = 0.0 #################################### # Main training loop #################################### start_time = time.time() no_epochs_done = 0 # the same as epoch_count, need to merge start_epoch = timer() it_count = 0 epoch_count = 0 acc_sum = 0 for ii in range(max_num_training_steps + 1): # original batch x_batch, y_batch, epoch_done = data_iterator.train_data.get_next_batch( num_ids, multiple_passes=True) no_epochs_done += epoch_done # noop trans noop_trans = np.zeros([len(x_batch), 3]) # id_batch starts with IDs of original examples id_batch = np.arange(0, num_ids, 1, dtype="int32") if use_core: # first num_id examples of batch are natural x_batch_inp = x_batch y_batch_inp = y_batch trans_inp = noop_trans id_batch_inp = id_batch start = timer() for _ in range(group_size - 1): if config.training.data_augmentation_core: raise NotImplementedError # create rotated examples x_batch_adv_i, trans_adv_i = attack.perturb( x_batch, y_batch, sess) # construct new batches including rotated examples x_batch_inp = np.concatenate((x_batch_inp, x_batch_adv_i), axis=0) y_batch_inp = np.concatenate((y_batch_inp, y_batch), axis=0) trans_inp = np.concatenate((trans_inp, trans_adv_i), axis=0) id_batch_inp = np.concatenate((id_batch_inp, id_batch), axis=0) end = timer() training_time += end - start run_time_without_eval += end - start run_time_adv_ex_creation += end - start trans_adv = trans_inp[num_ids:, ...] id_batch_adv = id_batch_inp[num_ids:] y_batch_adv = y_batch_inp[num_ids:] x_batch_adv = x_batch_inp[num_ids:, ...] else: if adversarial_training: start = timer() x_batch_inp, trans_inp = attack.perturb( x_batch, y_batch, sess) end = timer() training_time += end - start run_time_without_eval += end - start run_time_adv_ex_creation += end - start else: x_batch_inp, trans_inp = x_batch, noop_trans # for adversarial training and plain training, the following # variables coincide y_batch_inp = y_batch y_batch_adv = y_batch trans_adv = trans_inp x_batch_adv = x_batch_inp id_batch_inp = id_batch id_batch_adv = id_batch # feed_dict for training step inp_dict = { model.x_input: x_batch_inp, model.y_input: y_batch_inp, model.group: id_batch_inp, model.transform: trans_inp, model.is_training: False } # separate natural and adversarially transformed examples for eval nat_dict = { model.x_input: x_batch, model.y_input: y_batch, model.group: id_batch, model.transform: noop_trans, model.is_training: False } adv_dict = { model.x_input: x_batch_adv, model.y_input: y_batch_adv, model.group: id_batch_adv, model.transform: trans_adv, model.is_training: False } ########### Outputting/saving weights and evaluations ########### acc_grid_te = -1.0 avg_xent_grid_te = -1.0 acc_fo_te = -1.0 avg_xent_fo_te = -1.0 saved_weights = 0 # Compute training accuracy on this minibatch nat_acc_tr = 100 * sess.run(model.accuracy, feed_dict=nat_dict) # Output to stdout if epoch_done: epoch_time = timer() - start_epoch # Average av_acc = acc_sum / it_count # ToDo: Log this to file as well # Training accuracy over epoch print('Epoch {}: ({})'.format(epoch_count, datetime.now())) print(' training natural accuracy {:.4}%'.format(av_acc)) print(' {:.4} seconds per epoch'.format(epoch_time)) # Accuracy on entire test set nat_acc_te = 100 * sess.run(model.accuracy, feed_dict=eval_dict) print( ' test set natural accuracy {:.4}%'.format(nat_acc_te)) # Set loss sum, it count back to zero acc_sum = nat_acc_tr epoch_done = 0 epoch_count += 1 start_epoch = timer() it_count = 1 else: it_count += 1 acc_sum += nat_acc_tr # Output to stdout if ii % num_output_steps == 0: # nat_acc_tr = 100 * sess.run(model.accuracy, feed_dict=nat_dict) adv_acc_tr = 100 * sess.run(model.accuracy, feed_dict=adv_dict) inp_acc_tr = 100 * sess.run(model.accuracy, feed_dict=inp_dict) # print('Step {}: ({})'.format(ii, datetime.now())) # print(' training nat accuracy {:.4}%'.format(nat_acc_tr)) # print(' training adv accuracy {:.4}%'.format(adv_acc_tr)) # print(' training inp accuracy {:.4}%'.format(inp_acc_tr)) if ii != 0: # print(' {} examples per second'.format( # num_output_steps * batch_size / training_time)) training_time = 0.0 # Tensorboard summaries and heavy checkpoints if ii % num_summary_steps == 0: summary = sess.run(nat_summaries, feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) # Write a checkpoint and eval if it's time if ii % num_checkpoint_steps == 0 or ii == max_num_training_steps: # Save checkpoint data (weights) saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step) saved_weights = 1 # Write evaluation meta data for checkpoint if ii % num_easyeval_steps == 0 or ii == max_num_training_steps: # Get training accuracies nat_acc_tr = 100 * sess.run(model.accuracy, feed_dict=nat_dict) adv_acc_tr = 100 * sess.run(model.accuracy, feed_dict=adv_dict) inp_acc_tr = 100 * sess.run(model.accuracy, feed_dict=inp_dict) # Evaluation on random and natural [ acc_nat_te, acc_rand_adv_te, avg_xent_nat_te, avg_xent_adv_te ] = evaluate(model, attack_eval_random, sess, config, 'random', data_path, None) # Evaluation on grid (only for spatial attacks) if ((eval_during_training and ii % num_eval_steps == 0 and ii > 0 and config.attack.use_spatial) or (eval_during_training and ii == max_num_training_steps and config.attack.use_spatial)): if config.attack.use_spatial and config.attack.spatial_method == 'fo': # Evaluation on first-order PDG attack (too expensive to # evaluate more frequently on whole dataset) [_, acc_fo_te, _, avg_xent_fo_te] = evaluate(model, attack_eval_fo, sess, config, 'fo', data_path, None) # Evaluation on grid [_, acc_grid_te, _, avg_xent_grid_te ] = evaluate(model, attack_eval_grid, sess, config, "grid", data_path, eval_summary_writer) chkpt_id = this_repo.create_training_checkpoint( exp_id, training_step=ii, epoch=no_epochs_done, train_acc_nat=nat_acc_tr, train_acc_adv=adv_acc_tr, train_acc_inp=inp_acc_tr, test_acc_nat=acc_nat_te, test_acc_adv=acc_rand_adv_te, test_acc_fo=acc_fo_te, test_acc_grid=acc_grid_te, test_loss_nat=avg_xent_nat_te, test_loss_adv=avg_xent_adv_te, test_loss_fo=avg_xent_fo_te, test_loss_grid=avg_xent_grid_te) if saved_weights == 0: # Save checkpoint data (weights) saver.save( sess, os.path.join(model_dir, '{}_checkpoint'.format(chkpt_id))) # Actual training step start = timer() inp_dict[model.is_training] = True sess.run(train_step, feed_dict=inp_dict) end = timer() training_time += end - start run_time_without_eval += end - start run_time_train_step += end - start runtime = time.time() - start_time this_repo.mark_experiment_as_completed( exp_id, train_acc_nat=nat_acc_tr, train_acc_adv=adv_acc_tr, train_acc_inp=inp_acc_tr, test_acc_nat=acc_nat_te, test_acc_adv=acc_rand_adv_te, test_acc_fo=acc_fo_te, test_acc_grid=acc_grid_te, runtime=runtime, runtime_wo_eval=run_time_without_eval, runtime_train_step=run_time_train_step, runtime_adv_ex_creation=run_time_adv_ex_creation) return 0
def train(config): # seeding randomness tf.set_random_seed(config.training.tf_random_seed) np.random.seed(config.training.np_random_seed) # Setting up training parameters max_num_training_steps = config.training.max_num_training_steps step_size_schedule = config.training.step_size_schedule weight_decay = config.training.weight_decay momentum = config.training.momentum batch_size = config.training.batch_size group_size = config.training.group_size adversarial_training = config.training.adversarial_training eval_during_training = config.training.eval_during_training if eval_during_training: num_eval_steps = config.training.num_eval_steps # Setting up output parameters num_output_steps = config.training.num_output_steps num_summary_steps = config.training.num_summary_steps num_checkpoint_steps = config.training.num_checkpoint_steps #adapting batch size batch_size_group = batch_size * config.training.group_size # Setting up the data and the model data_path = config.data.data_path raw_cifar = cifar10_input.CIFAR10Data(data_path) global_step = tf.train.get_or_create_global_step() model = resnet.Model(config.model) # uncomment to get a list of trainable variables # model_vars = tf.trainable_variables() # slim.model_analyzer.analyze_vars(model_vars, print_info=True) # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule] boundaries = boundaries[1:] values = [sss[1] for sss in step_size_schedule] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values) total_loss = model.mean_xent + weight_decay * model.weight_decay_loss + model.core_loss optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) train_step = optimizer.minimize(total_loss, global_step=global_step) # Set up adversary attack = SpatialAttack(model, config.attack) attack_eval_random = SpatialAttack(model, config.eval_attack_random) attack_eval_grid = SpatialAttack(model, config.eval_attack_grid) # Setting up the Tensorboard and checkpoint outputs model_dir = config.model.output_dir if eval_during_training: eval_dir = os.path.join(model_dir, 'eval') if not os.path.exists(eval_dir): os.makedirs(eval_dir) # We add accuracy and xent twice so we can easily make three types of # comparisons in Tensorboard: # - train vs eval (for a single run) # - train of different runs # - eval of different runs saver = tf.train.Saver(max_to_keep=3) tf.summary.scalar('accuracy_adv_train', model.accuracy, collections=['adv']) tf.summary.scalar('accuracy_adv', model.accuracy, collections=['adv']) tf.summary.scalar('xent_adv_train', model.xent / batch_size_group, collections=['adv']) tf.summary.scalar('xent_adv', model.xent / batch_size_group, collections=['adv']) tf.summary.image('images_adv_train', model.x_image, collections=['adv']) adv_summaries = tf.summary.merge_all('adv') tf.summary.scalar('accuracy_nat_train', model.accuracy, collections=['nat']) tf.summary.scalar('accuracy_nat', model.accuracy, collections=['nat']) tf.summary.scalar('xent_nat_train', model.xent / batch_size_group, collections=['nat']) tf.summary.scalar('xent_nat', model.xent / batch_size_group, collections=['nat']) tf.summary.image('images_nat_train', model.x_image, collections=['nat']) tf.summary.scalar('learning_rate', learning_rate, collections=['nat']) nat_summaries = tf.summary.merge_all('nat') #dataAugmentation x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), x_input_placeholder) with tf.Session() as sess: # initialize data augmentation if config.training.data_augmentation: cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess) else: cifar = raw_cifar # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) if eval_during_training: eval_summary_writer = tf.summary.FileWriter(eval_dir) sess.run(tf.global_variables_initializer()) training_time = 0.0 training_time_total = 0.0 adv_time = 0.0 eval_time = 0.0 core_time = 0.0 # Main training loop for ii in range(max_num_training_steps + 1): x_batch, y_batch = cifar.train_data.get_next_batch( batch_size, multiple_passes=True) noop_trans = np.zeros([len(x_batch), 3]) # Compute Adversarial Perturbations if adversarial_training: start = timer() x_batch_adv, adv_trans = attack.perturb(x_batch, y_batch, sess) end = timer() adv_time += end - start else: x_batch_adv, adv_trans = x_batch, noop_trans #Create rotatated examples start = timer() x_batch_nat = x_batch y_batch_nat = y_batch id_batch = np.arange(0, batch_size, 1, dtype="int32") ids = np.arange(0, batch_size, 1, dtype="int32") for i in range(config.training.group_size): if config.training.data_augmentation_core: x_batch_core = sess.run( flipped, feed_dict={x_input_placeholder: x_batch}) else: x_batch_core = x_batch x_batch_group, trans_group = attack.perturb( x_batch_core, y_batch, sess) #construct new batches including rotateted examples x_batch_adv = np.concatenate((x_batch_adv, x_batch_group), axis=0) x_batch_nat = np.concatenate((x_batch_nat, x_batch_group), axis=0) y_batch_nat = np.concatenate((y_batch_nat, y_batch), axis=0) adv_trans = np.concatenate((adv_trans, trans_group), axis=0) noop_trans = np.concatenate((noop_trans, trans_group), axis=0) id_batch = np.concatenate((id_batch, ids), axis=0) end = timer() core_time += end - start nat_dict = { model.x_input: x_batch_nat, model.y_input: y_batch_nat, model.group: id_batch, model.transform: noop_trans, model.is_training: False } adv_dict = { model.x_input: x_batch_adv, model.y_input: y_batch_nat, model.group: id_batch, model.transform: adv_trans, model.is_training: False } # Output to stdout if ii % num_output_steps == 0: nat_acc = sess.run(model.accuracy, feed_dict=nat_dict) adv_acc = sess.run(model.accuracy, feed_dict=adv_dict) print('Step {}: ({})'.format(ii, datetime.now())) print(' training nat accuracy {:.4}%'.format(nat_acc * 100)) print(' training adv accuracy {:.4}%'.format(adv_acc * 100)) if ii != 0: print(' {} examples per second'.format( num_output_steps * batch_size_group / training_time)) training_time = 0.0 # Tensorboard summaries if ii % num_summary_steps == 0: summary = sess.run(adv_summaries, feed_dict=adv_dict) summary_writer.add_summary(summary, global_step.eval(sess)) summary = sess.run(nat_summaries, feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) # Write a checkpoint if ii % num_checkpoint_steps == 0: saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step) if eval_during_training and ii % num_eval_steps == 0: start = timer() evaluate(model, attack_eval_random, sess, config, "random", eval_summary_writer) evaluate(model, attack_eval_grid, sess, config, "grid", eval_summary_writer) end = timer() eval_time += end - start print(' {}seconds total training time'.format( training_time_total)) print(' {}seconds total adv. example time'.format(adv_time)) print( ' {}seconds total core example time'.format(core_time)) print(' {}seconds total evalutation time'.format(eval_time)) # Actual training step start = timer() if adversarial_training: adv_dict[model.is_training] = True sess.run(train_step, feed_dict=adv_dict) else: nat_dict[model.is_training] = True sess.run(train_step, feed_dict=nat_dict) end = timer() training_time += end - start training_time_total += end - start
def train(tf_seed, np_seed, train_steps, finetune_train_steps, out_steps, summary_steps, checkpoint_steps, step_size_schedule, weight_decay, momentum, train_batch_size, epsilon, replay_m, model_dir, source_model_dir, dataset, beta, gamma, disc_update_steps, adv_update_steps_per_iter, disc_layers, disc_base_channels, steps_before_adv_opt, adv_encoder_type, enc_output_activation, sep_opt_version, grad_image_ratio, final_grad_image_ratio, num_grad_image_ratios, normalize_zero_mean, eval_adv_attack, same_optimizer, only_fully_connected, finetuned_source_model_dir, train_finetune_source_model, finetune_img_random_pert, img_random_pert, only_finetune, finetune_whole_model, model_suffix, **kwargs): tf.set_random_seed(tf_seed) np.random.seed(np_seed) model_dir = model_dir + 'IGAM-%s_b%d_beta_%.3f_gamma_%.3f_disc_update_steps%d_l%dbc%d' % ( dataset, train_batch_size, beta, gamma, disc_update_steps, disc_layers, disc_base_channels) # TODO Replace with not defaults if img_random_pert: model_dir = model_dir + '_imgpert' if steps_before_adv_opt != 0: model_dir = model_dir + '_advdelay%d' % (steps_before_adv_opt) if train_steps != 80000: model_dir = model_dir + '_%dsteps' % (train_steps) if same_optimizer == False: model_dir = model_dir + '_adamDopt' if tf_seed != 451760341: model_dir = model_dir + '_tf_seed%d' % (tf_seed) if np_seed != 216105420: model_dir = model_dir + '_np_seed%d' % (np_seed) model_dir = model_dir + model_suffix # Setting up the data and the model data_path = get_path_dir(dataset=dataset, **kwargs) if dataset == 'cifar10': raw_data = cifar10_input.CIFAR10Data(data_path) else: raw_data = cifar100_input.CIFAR100Data(data_path) global_step = tf.train.get_or_create_global_step() increment_global_step_op = tf.assign(global_step, global_step + 1) reset_global_step_op = tf.assign(global_step, 0) source_model = ModelExtendedLogits(mode='train', target_task_class_num=100, train_batch_size=train_batch_size) model = Model(mode='train', dataset=dataset, train_batch_size=train_batch_size, normalize_zero_mean=normalize_zero_mean) # Setting up the optimizers boundaries = [int(sss[0]) for sss in step_size_schedule][1:] values = [sss[1] for sss in step_size_schedule] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values) c_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) finetune_optimizer = tf.train.AdamOptimizer(learning_rate=0.001) if same_optimizer: d_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) else: print("Using ADAM opt for DISC model") d_optimizer = tf.train.AdamOptimizer(learning_rate=0.001) # Compute input gradient (saliency map) input_grad = tf.gradients(model.target_softmax, model.x_input, name="gradients_ig")[0] source_model_input_grad = tf.gradients(source_model.target_softmax, source_model.x_input, name="gradients_ig_source_model")[0] # lp norm diff between input_grad & source_model_input_grad input_grad_l2_norm_diff = tf.reduce_mean( tf.reduce_sum(tf.pow(tf.subtract(input_grad, source_model_input_grad), 2.0), keepdims=True)) # Setting up the discriminator model labels_input_grad = tf.zeros(tf.shape(input_grad)[0], dtype=tf.int64) labels_source_model_input_grad = tf.ones(tf.shape(input_grad)[0], dtype=tf.int64) disc_model = IgamConvDiscriminatorModel( mode='train', dataset=dataset, train_batch_size=train_batch_size, num_conv_layers=disc_layers, base_num_channels=disc_base_channels, normalize_zero_mean=normalize_zero_mean, x_modelgrad_input_tensor=input_grad, y_modelgrad_input_tensor=labels_input_grad, x_source_modelgrad_input_tensor=source_model_input_grad, y_source_modelgrad_input_tensor=labels_source_model_input_grad, only_fully_connected=only_fully_connected) t_vars = tf.trainable_variables() C_vars = [var for var in t_vars if 'classifier' in var.name] D_vars = [var for var in t_vars if 'discriminator' in var.name] source_model_vars = [ var for var in t_vars if ('discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name) ] source_model_target_logit_vars = [ var for var in t_vars if 'target_task_logit' in var.name ] source_model_saver = tf.train.Saver(var_list=source_model_vars) finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars finetuned_source_model_saver = tf.train.Saver( var_list=finetuned_source_model_vars) # Source model finetune optimization source_model_finetune_loss = source_model.target_task_mean_xent + weight_decay * source_model.weight_decay_loss total_loss = model.mean_xent + weight_decay * model.weight_decay_loss - beta * disc_model.mean_xent + gamma * input_grad_l2_norm_diff classification_c_loss = model.mean_xent + weight_decay * model.weight_decay_loss adv_c_loss = -beta * disc_model.mean_xent # Discriminator: Optimizating computation # discriminator loss total_d_loss = disc_model.mean_xent + weight_decay * disc_model.weight_decay_loss # Finetune source_model if finetune_whole_model: finetune_min_step = finetune_optimizer.minimize( source_model_finetune_loss, var_list=finetuned_source_model_vars) else: finetune_min_step = finetune_optimizer.minimize( source_model_finetune_loss, var_list=source_model_target_logit_vars) # Train classifier # classifier opt step final_grads = c_optimizer.compute_gradients(total_loss, var_list=C_vars) no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in final_grads] c_min_step = c_optimizer.apply_gradients(no_pert_grad) # c_min_step = c_optimizer.minimize(total_loss, var_list=C_vars) classification_final_grads = c_optimizer.compute_gradients( classification_c_loss, var_list=C_vars) classification_no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in classification_final_grads] c_classification_min_step = c_optimizer.apply_gradients( classification_no_pert_grad) # discriminator opt step d_min_step = d_optimizer.minimize(total_d_loss, var_list=D_vars) # Loss gradients to the model params logit_weights = tf.get_default_graph().get_tensor_by_name( 'classifier/logit/DW:0') last_conv_weights = tf.get_default_graph().get_tensor_by_name( 'classifier/unit_3_4/sub2/conv2/DW:0') first_conv_weights = tf.get_default_graph().get_tensor_by_name( 'classifier/input/init_conv/DW:0') model_xent_logit_grad_norm = tf.norm(tf.gradients(model.mean_xent, logit_weights)[0], ord='euclidean') disc_xent_logit_grad_norm = tf.norm(tf.gradients(disc_model.mean_xent, logit_weights)[0], ord='euclidean') input_grad_l2_norm_diff_logit_grad_norm = tf.norm(tf.gradients( input_grad_l2_norm_diff, logit_weights)[0], ord='euclidean') model_xent_last_conv_grad_norm = tf.norm(tf.gradients( model.mean_xent, last_conv_weights)[0], ord='euclidean') disc_xent_last_conv_grad_norm = tf.norm(tf.gradients( disc_model.mean_xent, last_conv_weights)[0], ord='euclidean') input_grad_l2_norm_diff_last_conv_grad_norm = tf.norm(tf.gradients( input_grad_l2_norm_diff, last_conv_weights)[0], ord='euclidean') model_xent_first_conv_grad_norm = tf.norm(tf.gradients( model.mean_xent, first_conv_weights)[0], ord='euclidean') disc_xent_first_conv_grad_norm = tf.norm(tf.gradients( disc_model.mean_xent, first_conv_weights)[0], ord='euclidean') input_grad_l2_norm_diff_first_conv_grad_norm = tf.norm(tf.gradients( input_grad_l2_norm_diff, first_conv_weights)[0], ord='euclidean') # Setting up the Tensorboard and checkpoint outputs if not os.path.exists(model_dir): os.makedirs(model_dir) saver = tf.train.Saver(max_to_keep=1) tf.summary.scalar('C accuracy', model.accuracy) tf.summary.scalar('D accuracy', disc_model.accuracy) tf.summary.scalar('C xent', model.xent / train_batch_size) tf.summary.scalar('D xent', disc_model.xent / train_batch_size) tf.summary.scalar('total C loss', total_loss / train_batch_size) tf.summary.scalar('total D loss', total_d_loss / train_batch_size) tf.summary.scalar('adv C loss', adv_c_loss / train_batch_size) tf.summary.scalar('C cls xent loss', model.mean_xent) tf.summary.scalar('D xent loss', disc_model.mean_xent) # Loss gradients tf.summary.scalar('model_xent_logit_grad_norm', model_xent_logit_grad_norm) tf.summary.scalar('disc_xent_logit_grad_norm', disc_xent_logit_grad_norm) tf.summary.scalar('input_grad_l2_norm_diff_logit_grad_norm', input_grad_l2_norm_diff_logit_grad_norm) tf.summary.scalar('model_xent_last_conv_grad_norm', model_xent_last_conv_grad_norm) tf.summary.scalar('disc_xent_last_conv_grad_norm', disc_xent_last_conv_grad_norm) tf.summary.scalar('input_grad_l2_norm_diff_last_conv_grad_norm', input_grad_l2_norm_diff_last_conv_grad_norm) tf.summary.scalar('model_xent_first_conv_grad_norm', model_xent_first_conv_grad_norm) tf.summary.scalar('disc_xent_first_conv_grad_norm', disc_xent_first_conv_grad_norm) tf.summary.scalar('input_grad_l2_norm_diff_first_conv_grad_norm', input_grad_l2_norm_diff_first_conv_grad_norm) merged_summaries = tf.summary.merge_all() with tf.Session() as sess: print( 'important params >>> \n model dir: %s \n dataset: %s \n training batch size: %d \n' % (model_dir, dataset, train_batch_size)) # initialize data augmentation if dataset == 'cifar10': data = cifar10_input.AugmentedCIFAR10Data(raw_data, sess, model) else: data = cifar100_input.AugmentedCIFAR100Data(raw_data, sess, model) # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir + '/train', sess.graph) eval_summary_writer = tf.summary.FileWriter(model_dir + '/eval') sess.run(tf.global_variables_initializer()) # Restore source model source_model_file = tf.train.latest_checkpoint(source_model_dir) source_model_saver.restore(sess, source_model_file) # Finetune source model here if train_finetune_source_model: time_before_finetuning = datetime.now() for ii in tqdm(range(finetune_train_steps)): x_batch, y_batch = data.train_data.get_next_batch( train_batch_size, multiple_passes=True) if finetune_img_random_pert: x_batch = x_batch + np.random.uniform( -epsilon, epsilon, x_batch.shape) x_batch = np.clip(x_batch, 0, 255) # ensure valid pixel range nat_dict = { source_model.x_input: x_batch, source_model.y_input: y_batch } # Output to stdout if ii % summary_steps == 0: train_finetune_acc, train_finetune_loss = sess.run( [ source_model.target_task_accuracy, source_model_finetune_loss ], feed_dict=nat_dict) x_eval_batch, y_eval_batch = data.eval_data.get_next_batch( train_batch_size, multiple_passes=True) if img_random_pert: x_eval_batch = x_eval_batch + np.random.uniform( -epsilon, epsilon, x_eval_batch.shape) x_eval_batch = np.clip(x_eval_batch, 0, 255) # ensure valid pixel range eval_dict = { source_model.x_input: x_eval_batch, source_model.y_input: y_eval_batch } val_finetune_acc, val_finetune_loss = sess.run( [ source_model.target_task_accuracy, source_model_finetune_loss ], feed_dict=eval_dict) print('Source Model Finetune Step {}: ({})'.format( ii, datetime.now())) print( ' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%' .format(train_finetune_acc * 100, val_finetune_acc * 100)) print(' training nat c loss: {}'.format( train_finetune_loss)) print(' validation nat c loss: {}'.format( val_finetune_loss)) sys.stdout.flush() sess.run(finetune_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) time_after_finetuning = datetime.now() finetuning_time = time_after_finetuning - time_before_finetuning finetuning_time_file_path = os.path.join(model_dir, 'finetuning_time.txt') with open(finetuning_time_file_path, "w") as f: f.write("Total finetuning time: {}".format( str(finetuning_time))) print("Total finetuning time: {}".format(str(finetuning_time))) finetuned_source_model_saver.save(sess, os.path.join( finetuned_source_model_dir, 'checkpoint'), global_step=global_step) if only_finetune: return else: finetuned_source_model_file = tf.train.latest_checkpoint( finetuned_source_model_dir) finetuned_source_model_saver.restore(sess, finetuned_source_model_file) # reset global step to 0 before running main training loop sess.run(reset_global_step_op) time_before_training = datetime.now() # Main training loop for ii in tqdm(range(train_steps)): x_batch, y_batch = data.train_data.get_next_batch( train_batch_size, multiple_passes=True) if img_random_pert: x_batch = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape) x_batch = np.clip(x_batch, 0, 255) # ensure valid pixel range labels_source_modelgrad_disc = np.ones_like(y_batch, dtype=np.int64) # Sample randinit input grads nat_dict = { model.x_input: x_batch, model.y_input: y_batch, source_model.x_input: x_batch, source_model.y_input: y_batch } # Output to stdout if ii % summary_steps == 0: train_acc, train_disc_acc, train_c_loss, train_d_loss, train_adv_c_loss, summary = sess.run( [ model.accuracy, disc_model.accuracy, total_loss, total_d_loss, adv_c_loss, merged_summaries ], feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) x_eval_batch, y_eval_batch = data.eval_data.get_next_batch( train_batch_size, multiple_passes=True) if img_random_pert: x_eval_batch = x_eval_batch + np.random.uniform( -epsilon, epsilon, x_eval_batch.shape) x_eval_batch = np.clip(x_eval_batch, 0, 255) # ensure valid pixel range labels_source_modelgrad_disc = np.ones_like(y_eval_batch, dtype=np.int64) eval_dict = { model.x_input: x_eval_batch, model.y_input: y_eval_batch, source_model.x_input: x_eval_batch, source_model.y_input: y_eval_batch } val_acc, val_disc_acc, val_c_loss, val_d_loss, val_adv_c_loss, summary = sess.run( [ model.accuracy, disc_model.accuracy, total_loss, total_d_loss, adv_c_loss, merged_summaries ], feed_dict=eval_dict) eval_summary_writer.add_summary(summary, global_step.eval(sess)) print('Step {}: ({})'.format(ii, datetime.now())) print( ' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%' .format(train_acc * 100, val_acc * 100)) print( ' training nat disc accuracy {:.4}% -- validation nat disc accuracy {:.4}%' .format(train_disc_acc * 100, val_disc_acc * 100)) print( ' training nat c loss: {}, d loss: {}, adv c loss: {}' .format(train_c_loss, train_d_loss, train_adv_c_loss)) print( ' validation nat c loss: {}, d loss: {}, adv c loss: {}' .format(val_c_loss, val_d_loss, val_adv_c_loss)) sys.stdout.flush() # Tensorboard summaries elif ii % out_steps == 0: nat_acc, nat_disc_acc, nat_c_loss, nat_d_loss, nat_adv_c_loss = sess.run( [ model.accuracy, disc_model.accuracy, total_loss, total_d_loss, adv_c_loss ], feed_dict=nat_dict) print('Step {}: ({})'.format(ii, datetime.now())) print(' training nat accuracy {:.4}%'.format(nat_acc * 100)) print(' training nat disc accuracy {:.4}%'.format( nat_disc_acc * 100)) print( ' training nat c loss: {}, d loss: {}, adv c loss: {}' .format(nat_c_loss, nat_d_loss, nat_adv_c_loss)) # Write a checkpoint if (ii + 1) % checkpoint_steps == 0: saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step) # default mode if sep_opt_version == 1: if ii >= steps_before_adv_opt: # Actual training step for Classifier sess.run(c_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) if ii % disc_update_steps == 0: # Actual training step for Discriminator sess.run(d_min_step, feed_dict=nat_dict) else: # only train on classification loss sess.run(c_classification_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) elif sep_opt_version == 2: # Actual training step for Classifier if ii >= steps_before_adv_opt: if adv_update_steps_per_iter > 1: sess.run(c_classification_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) for i in range(adv_update_steps_per_iter): x_batch, y_batch = data.train_data.get_next_batch( train_batch_size, multiple_passes=True) if img_random_pert: x_batch = x_batch + np.random.uniform( -epsilon, epsilon, x_batch.shape) x_batch = np.clip( x_batch, 0, 255) # ensure valid pixel range nat_dict = { model.x_input: x_batch, model.y_input: y_batch, source_model.x_input: x_batch, source_model.y_input: y_batch } sess.run(c_adv_min_step, feed_dict=nat_dict) else: sess.run(c_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) if ii % disc_update_steps == 0: # Actual training step for Discriminator sess.run(d_min_step, feed_dict=nat_dict) else: # only train on classification loss sess.run(c_classification_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) elif sep_opt_version == 0: if ii >= steps_before_adv_opt: if ii % disc_update_steps == 0: sess.run([c_min_step, d_min_step], feed_dict=nat_dict) sess.run(increment_global_step_op) else: sess.run(c_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) else: sess.run(c_classification_min_step, feed_dict=nat_dict) sess.run(increment_global_step_op) time_after_training = datetime.now() training_time = time_after_training - time_before_training training_time_file_path = os.path.join(model_dir, 'training_time.txt') with open(training_time_file_path, "w") as f: f.write("Total Training time: {}".format(str(training_time))) print("Total Training time: {}".format(str(training_time))) # full test evaluation if dataset == 'cifar10': raw_data = cifar10_input.CIFAR10Data(data_path) else: raw_data = cifar100_input.CIFAR100Data(data_path) data_size = raw_data.eval_data.n if data_size % train_batch_size == 0: eval_steps = data_size // train_batch_size else: eval_steps = data_size // train_batch_size # eval_steps = data_size // train_batch_size + 1 total_num_correct = 0 for ii in tqdm(range(eval_steps)): x_eval_batch, y_eval_batch = raw_data.eval_data.get_next_batch( train_batch_size, multiple_passes=False) eval_dict = { model.x_input: x_eval_batch, model.y_input: y_eval_batch } num_correct = sess.run(model.num_correct, feed_dict=eval_dict) total_num_correct += num_correct eval_acc = total_num_correct / data_size clean_eval_file_path = os.path.join(model_dir, 'full_clean_eval_acc.txt') with open(clean_eval_file_path, "a+") as f: f.write("Full clean eval_acc: {}%".format(eval_acc * 100)) print("Full clean eval_acc: {}%".format(eval_acc * 100)) devices = sess.list_devices() for d in devices: print("sess' device names:") print(d.name) return model_dir
def train(config='configs/cifar10_config_stn.json', save_root_path='/cluster/work/math/fanyang-broglil/CoreRepo', worstofk=None, attack_style=None, attack_limits=None, lambda_core=None, num_grouped_ids=None, num_ids=None, group_size=None, use_core=None, seed=None, this_repo=None): config_dict = utilities.get_config(config) config_dict_copy = copy.deepcopy(config_dict) # model_dir = config_dict['model']['output_dir'] # if not os.path.exists(model_dir): # os.makedirs(model_dir) # # keep the configuration file with the model for reproducibility # with open(os.path.join(model_dir, 'config.json'), 'w') as f: # json.dump(config_dict, f, sort_keys=True, indent=4) config = utilities.config_to_namedtuple(config_dict) # seeding randomness if seed == None: seed = config.training.tf_random_seed else: config_dict_copy['training']['tf_random_seed'] = seed tf.set_random_seed(seed) np.random.seed(seed) # Setting up training parameters max_num_epochs = config.training.max_num_epochs step_size_schedule = config.training.step_size_schedule weight_decay = config.training.weight_decay momentum = config.training.momentum num_ids = config.training.num_ids # number of IDs per minibatch if group_size == None: group_size = config.training.group_size else: config_dict_copy['training']['group_size'] = group_size if num_grouped_ids == None: num_grouped_ids = config.training.num_grouped_ids else: config_dict_copy['training']['num_grouped_ids'] = num_grouped_ids if num_ids == None: num_ids = config.training.num_ids else: config_dict_copy['training']['num_ids'] = num_ids if lambda_core == None: lambda_core = config.training.lambda_ else: config_dict_copy['training']['lambda_'] = lambda_core if use_core == None: use_core = config.training.use_core else: config_dict_copy['training']['use_core'] = use_core adversarial_training = config.training.adversarial_training eval_during_training = config.training.eval_during_training if eval_during_training: num_eval_steps = config.training.num_eval_steps # Setting up output parameters num_summary_steps = config.training.num_summary_steps num_checkpoint_steps = config.training.num_checkpoint_steps num_easyeval_steps = config.training.num_easyeval_steps # mini batch size per iteration # ToDo: need to make this support variable number of num_grouped_ids batch_size = num_ids + num_grouped_ids # Setting up model and loss model_family = config.model.model_family with_transformer = config.model.transformer translation_model = config.model.translation_model if model_family == "resnet": model = loc_net.Model(config.model) else: print("Model family does not exist") exit() if use_core: total_loss = model.y_loss #model.mean_xent + weight_decay * model.weight_decay_loss + lambda_core * model.core_loss2 else: total_loss = model.y_loss #model.mean_xent + weight_decay * model.weight_decay_loss # Setting up the data and the model data_path = config.data.data_path if config.data.dataset_name == "cifar-10": raw_cifar = cifar10_input.CIFAR10Data(data_path) elif config.data.dataset_name == "cifar-100": raw_cifar = cifar100_input.CIFAR100Data(data_path) else: raise ValueError("Unknown dataset name.") # uncomment to get a list of trainable variables # model_vars = tf.trainable_variables() # slim.model_analyzer.analyze_vars(model_vars, print_info=True) # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule] boundaries = boundaries[1:] values = [sss[1] for sss in step_size_schedule] global_step = tf.train.get_or_create_global_step() learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values) optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, # name="Adam") train_step = optimizer.minimize(total_loss, global_step=global_step) # Set up adversary if worstofk == None: worstofk = config.attack.random_tries else: config_dict_copy['attack']['random_tries'] = worstofk # @ Luzius: incorporate being able to choose multiple transformations if attack_style == None: attack_style = 'rotate' # Training attack attack = SpatialAttack(model, config.attack, 'random', worstofk, attack_limits) # Different eval attacks # Same attack as worstofk # @ Luzius: currently the names are not clear/consistent since I wasn't sure if we actually want random or not since you originally had your attack like that but I feel like it should rather be worstofk? # attack_eval_adv = SpatialAttack(model, config.attack, 'random', worstofk, attack_limits) attack_eval_random = SpatialAttack(model, config.attack, 'random', 1, attack_limits) # Grid attack attack_eval_grid = SpatialAttack(model, config.attack, 'grid', None, attack_limits) # ------------------START EXPERIMENT ------------------------- # Initialize the Repo print("==> Creating repo..") # Create repo object if it wasn't passed, comment out if repo has issues if this_repo == None: this_repo = exprepo.ExperimentRepo(root_dir=save_root_path) # Create new experiment if this_repo != None: exp_id = this_repo.create_new_experiment('cifar-10', model_family, worstofk, attack_style, attack_limits, lambda_core, num_grouped_ids, group_size, config_dict_copy) # Setting up the Tensorboard and checkpoint outputs model_dir = '%s/logdir/%s' % (save_root_path, exp_id) os.makedirs(model_dir, exist_ok=True) # We add accuracy and xent twice so we can easily make three types of # comparisons in Tensorboard: # - train vs eval (for a single run) # - train of different runs # - eval of different runs saver = tf.train.Saver(max_to_keep=3) tf.summary.scalar('accuracy_nat_train', model.accuracy, collections=['nat']) tf.summary.scalar('accuracy_nat', model.accuracy, collections=['nat']) tf.summary.scalar('xent_nat_train', model.xent / batch_size, collections=['nat']) tf.summary.scalar('xent_nat', model.xent / batch_size, collections=['nat']) tf.summary.image('images_nat_train', model.x_image, collections=['nat']) tf.summary.scalar('learning_rate', learning_rate, collections=['nat']) nat_summaries = tf.summary.merge_all('nat') #dataAugmentation x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), x_input_placeholder) tot_samp = raw_cifar.train_data.n max_num_iterations = int(np.floor((tot_samp / num_ids) * max_num_epochs)) print("Total # of samples is: %d; This exp. will run %d iterations" % (tot_samp, max_num_iterations)) # Compute the (epoch) gaps between summary, worstof1eval, checkpoints should happen summary_gap = int(np.floor(max_num_epochs / num_summary_steps)) easyeval_gap = int(np.floor(max_num_epochs / num_easyeval_steps)) checkpoint_gap = int(np.floor(max_num_epochs / num_checkpoint_steps)) with tf.Session() as sess: # initialize data augmentation if config.training.data_augmentation: if config.data.dataset_name == "cifar-10": cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess) elif config.data.dataset_name == "cifar-100": cifar = cifar100_input.AugmentedCIFAR100Data(raw_cifar, sess) else: raise ValueError("Unknown dataset name.") else: cifar = raw_cifar cifar_eval_dict = { model.x_input: cifar.eval_data.xs, model.y_input: cifar.eval_data.ys, model.group: np.arange(0, batch_size, 1, dtype="int32"), model.transform: np.zeros([cifar.eval_data.n, 3]), model.is_training: False } # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) #if eval_during_training: eval_dir = os.path.join(model_dir, 'eval') os.makedirs(eval_dir, exist_ok=True) eval_summary_writer = tf.summary.FileWriter(eval_dir) sess.run(tf.global_variables_initializer()) training_time = 0.0 #################################### # Main training loop #################################### # Initialize cache variables start_time = time.time() start_epoch = timer() it_count = 0 epoch_count = 0 acc_sum = 0 it_summary = 0 it_easyeval = 0 it_ckpt = 0 adv_time = 0 train_time = 0 for ii in range(max_num_iterations + 1): x_batch, y_batch, epoch_done = cifar.train_data.get_next_batch( num_ids, multiple_passes=True) noop_trans = np.zeros([len(x_batch), 3]) x_batch_nat = x_batch y_batch_nat = y_batch id_batch = np.arange(0, num_ids, 1, dtype="int32") if use_core: # Create rotated examples start = timer() ids = np.arange(0, num_grouped_ids, 1, dtype="int32") for i in range(config.training.group_size): if config.training.data_augmentation_core: x_batch_core = sess.run( flipped, feed_dict={ x_input_placeholder: x_batch[0:num_grouped_ids, :, :, :] }) else: x_batch_core = x_batch[0:num_grouped_ids, :, :, :] x_batch_group, trans_group = attack.perturb( x_batch_core, y_batch[0:num_grouped_ids], sess) #construct new batches including rotated examples x_batch_nat = np.concatenate((x_batch_nat, x_batch_group), axis=0) y_batch_nat = np.concatenate((y_batch_nat, y_batch), axis=0) noop_trans = np.concatenate((noop_trans, trans_group), axis=0) id_batch = np.concatenate((id_batch, ids), axis=0) end = timer() training_time += end - start adv_time += end - start else: if adversarial_training: start = timer() x_batch_nat, noop_trans = attack.perturb( x_batch, y_batch, sess) end = timer() adv_time += end - start else: x_batch_nat, noop_trans = x_batch, noop_trans nat_dict = { model.x_input: x_batch_nat, model.y_input: y_batch_nat, model.group: id_batch, model.transform: noop_trans, model.is_training: False } ################# Outputting/saving weights and evaluations ############### nat_acc = -1.0 acc_grid = -1.0 avg_xent_grid = -1.0 saved_weights = 0 # Compute training accuracy on this minibatch train_nat_acc = sess.run(model.accuracy, feed_dict=nat_dict) # Output to stdout if epoch_done: epoch_time = timer() - start_epoch # Average av_acc = acc_sum / it_count # ToDo: Log this to file as well # Training accuracy over epoch print('Epoch {}: ({})'.format(epoch_count, datetime.now())) print(' training natural accuracy {:.4}%'.format(av_acc * 100)) print(' {:.4} seconds per epoch'.format(epoch_time)) # Accuracy on entire test set test_nat_acc = sess.run(model.accuracy, feed_dict=cifar_eval_dict) print(' test set natural accuracy {:.4}%'.format( test_nat_acc * 100)) # print(' {:.4} seconds for test evaluation'.format(test_time)) print("example TIME") print(adv_time) print("train TIME") print(train_time) ########### Things to do every xxx epochs ############# # Check if worstof1 eval should be run if it_summary == summary_gap - 1 or epoch_count == max_num_epochs - 1: summary = sess.run(nat_summaries, feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) it_summary = 0 else: it_summary += 1 if it_easyeval == easyeval_gap - 1 or epoch_count == max_num_epochs - 1: # Evaluation on adv and natural [acc_nat, acc_adv, avg_xent_nat, avg_xent_adv] = evaluate(model, attack_eval_random, sess, config, "random", data_path, None) # Save in checkpoint chkpt_id = this_repo.create_training_checkpoint( exp_id, training_step=ii, epoch=epoch_count, train_acc_nat=nat_acc, test_acc_adv=acc_adv, test_acc_nat=acc_nat, test_loss_adv=avg_xent_adv, test_loss_nat=avg_xent_nat) it_easyeval = 0 else: it_easyeval += 1 startt = timer() if it_ckpt == checkpoint_gap - 1 or epoch_count == max_num_epochs - 1: # Create checkpoint id if non-existent if not chkpt_id: chkpt_id = this_repo.create_training_checkpoint( exp_id, training_step=ii, epoch=epoch_count, train_acc_nat=train_nat_acc, test_acc_nat=test_nat_acc) # Save checkpoint data (weights) saver.save( sess, os.path.join(model_dir, '{}_checkpoint'.format(chkpt_id))) print(' chkpt saving took {:.4}s '.format(timer() - startt)) it_ckpt = 0 else: it_ckpt += 1 # Set loss sum, it count back to zero acc_sum = train_nat_acc epoch_done = 0 epoch_count += 1 start_epoch = timer() it_count = 1 else: it_count += 1 acc_sum += train_nat_acc # Actual training step start = timer() nat_dict[model.is_training] = True sess.run(train_step, feed_dict=nat_dict) training_time += timer() - start train_time += timer() - start runtime = time.time() - start_time # Do all evaluations in the last step - on grid [_, acc_grid, _, avg_xent_grid] = evaluate(model, attack_eval_grid, sess, config, "grid", data_path, eval_summary_writer) this_repo.mark_experiment_as_completed(exp_id, train_acc_nat=nat_acc, test_acc_adv=acc_adv, test_acc_nat=acc_nat, test_acc_grid=acc_grid, runtime=runtime) return 0