def train(config='configs/cifar10_regression.json', save_root_path='/cluster/work/math/fanyang-broglil/CoreRepo', experiment_json_fname='experiments.json', local_json_dir_name='local_json_files', worstofk=None, attack_style=None, attack_limits=None, fo_epsilon=None, fo_step_size=None, fo_num_steps=None, lambda_core=None, num_ids = None, group_size=None, use_core=None, seed=None, save_in_local_json=True, this_repo=None): # reset default graph (needed for running locally with run_jobs_ray.py) tf.reset_default_graph() # get configs config_dict = utilities.get_config(config) config_dict_copy = copy.deepcopy(config_dict) config = utilities.config_to_namedtuple(config_dict) # seeding randomness if seed == None: seed = config.training.tf_random_seed else: config_dict_copy['training']['tf_random_seed'] = seed tf.set_random_seed(seed) np.random.seed(seed) # Setting up training parameters max_num_training_steps = config.training.max_num_training_steps step_size_schedule = config.training.step_size_schedule weight_decay = config.training.weight_decay momentum = config.training.momentum if group_size == None: group_size = config.training.group_size else: config_dict_copy['training']['group_size'] = int(group_size) if num_ids == None: num_ids = config.training.num_ids else: config_dict_copy['training']['num_ids'] = int(num_ids) if lambda_core == None: lambda_core = config.training.lambda_ else: config_dict_copy['training']['lambda_'] = float(lambda_core) if use_core == None: use_core = config.training.use_core else: config_dict_copy['training']['use_core'] = use_core batch_size = config.training.batch_size # number of groups with group size > 1 num_grouped_ids = batch_size - num_ids # number of unique ids needs to be larger than half the desired batch size # so that full batch can be filled up assert num_ids >= batch_size/2 # currently, code is designed for groups of size 2 assert config.training.group_size == 2 adversarial_training = config.training.adversarial_training eval_during_training = config.training.eval_during_training if eval_during_training: num_eval_steps = config.training.num_eval_steps # Setting up output parameters num_output_steps = config.training.num_output_steps num_summary_steps = config.training.num_summary_steps num_checkpoint_steps = config.training.num_checkpoint_steps num_easyeval_steps = config.training.num_easyeval_steps # Setting up the data and the model data_path = config.data.data_path if config.data.dataset_name == "cifar-10": raw_iterator = cifar10_input.CIFAR10Data(data_path) elif config.data.dataset_name == "cifar-100": raw_iterator = cifar100_input.CIFAR100Data(data_path) elif config.data.dataset_name == "svhn": raw_iterator = svhn_input.SVHNData(data_path) else: raise ValueError("Unknown dataset name.") global_step = tf.train.get_or_create_global_step() model_family = config.model.model_family if model_family == "resnet": if config.attack.use_spatial and config.attack.spatial_method == 'fo': diffable = True else: diffable = False model = resnet_reg.Model(config.model, num_ids, diffable, config.training.adversarial_ce) elif model_family == "vgg": if config.attack.use_spatial and config.attack.spatial_method == 'fo': diffable = True else: diffable = False if config.training.adversarial_ce: raise NotImplementedError model = vgg.Model(config.model, num_ids, diffable) # uncomment to get a list of trainable variables # model_vars = tf.trainable_variables() # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule] boundaries = boundaries[1:] values = [sss[1] for sss in step_size_schedule] learning_rate = tf.train.piecewise_constant( tf.cast(global_step, tf.int32), boundaries, values) if use_core and lambda_core > 0: print("WARNING: in regression task, should not enter this section!\n") total_loss = (model.reg_loss + weight_decay * model.weight_decay_loss + lambda_core * model.core_loss2) else: total_loss = model.reg_loss + weight_decay * model.weight_decay_loss optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) train_step = optimizer.minimize(total_loss, global_step=global_step) # Set up adversary if worstofk == None: worstofk = config.attack.random_tries else: config_dict_copy['attack']['random_tries'] = worstofk if fo_epsilon == None: fo_epsilon = config.attack.epsilon else: config_dict_copy['attack']['epsilon'] = fo_epsilon if fo_step_size == None: fo_step_size = config.attack.step_size else: config_dict_copy['attack']['step_size'] = fo_step_size if fo_num_steps == None: fo_num_steps = config.attack.num_steps else: config_dict_copy['attack']['num_steps'] = fo_num_steps # @ Luzius: incorporate being able to choose multiple transformations if attack_style == None: attack_style = 'rotate' simple_train = config.attack.simple_train if simple_train == False: # Training attack == denfense # L-inf attack if use_spatial is False and use_linf is True # spatial attack if use_spatial is True and use_linf is False # spatial random attack if spatial_method is 'random' # spatial PGD attack if spatial_method is 'fo' attack = SpatialAttack(model, config.attack, config.attack.spatial_method, worstofk, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # Different eval attacks # Random attack # L-inf attack if use_spatial is False and use_linf is True # random (worst-of-1) spatial attack if use_spatial is True # and use_linf is False attack_eval_random = SpatialAttack(model, config.attack, 'random', 1, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # First order attack # L-inf attack if use_spatial is False and use_linf is True # first-order spatial attack if use_spatial is True and use_linf is False attack_eval_fo = SpatialAttack(model, config.attack, 'fo', 1, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # Grid attack # spatial attack if use_spatial is True and use_linf is False # not executed for L-inf attacks attack_eval_grid = SpatialAttack(model, config.attack, 'grid', None, attack_limits) else: attack = SpatialAttack(model, config.attack, config.attack.spatial_method, worstofk, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # TODO(christina): add L-inf attack with random restarts # ------------------START EXPERIMENT ------------------------- # Initialize the Repo print("==> Creating repo..") # Create repo object if it wasn't passed, comment out if repo has issues if this_repo == None: this_repo = exprepo.ExperimentRepo( save_in_local_json=save_in_local_json, json_filename=experiment_json_fname, local_dir_name=local_json_dir_name, root_dir=save_root_path) # Create new experiment if this_repo != None: exp_id = this_repo.create_new_experiment(config.data.dataset_name, model_family, worstofk, attack_style, attack_limits, lambda_core, num_grouped_ids, group_size, config_dict_copy) # Setting up the Tensorboard and checkpoint outputs model_dir = '%s/logdir/%s' % (save_root_path, exp_id) # We add accuracy and xent twice so we can easily make three types of # comparisons in Tensorboard: # - train vs eval (for a single run) # - train of different runs # - eval of different runs saver = tf.train.Saver(max_to_keep=3) tf.summary.scalar('regression loss function value', model.reg_loss, collections= ['err']) tf.summary.scalar('avg_abs_err_x', model.avg_abs_err_transX, collections=['err']) tf.summary.scalar('avg_abs_err_y', model.avg_abs_err_transY, collections=['err']) tf.summary.scalar('avg_abs_err_rot', model.avg_abs_err_rot, collections=['err']) tf.summary.scalar('avg_rel_err_x', model.avg_rel_err_transX, collections=['err']) tf.summary.scalar('avg_rel_err_y', model.avg_rel_err_transY, collections=['err']) tf.summary.scalar('avg_rel_err_rot', model.avg_rel_err_rot, collections=['err']) tf.summary.scalar('learning_rate', learning_rate, collections=['err']) tf.summary.image('before_reflect_padding', model.before_reflect_x, collections=['err']) tf.summary.image('after_reflect_padding', model.reflect_x, collections=['err']) err_summaries = tf.summary.merge_all('err') tf.summary.scalar('full_batch_avg_abs_err_x', model.avg_abs_err_transX, collections=['eval']) tf.summary.scalar('full_batch_avg_abs_err_y', model.avg_abs_err_transY, collections=['eval']) tf.summary.scalar('full_batch_avg_abs_err_rot', model.avg_abs_err_rot, collections=['eval']) tf.summary.scalar('full_batch_avg_rel_err_x', model.avg_rel_err_transX, collections=['eval']) tf.summary.scalar('full_batch_avg_rel_err_y', model.avg_rel_err_transY, collections=['eval']) tf.summary.scalar('full_batch_avg_rel_err_rot', model.avg_rel_err_rot, collections=['eval']) tf.summary.scalar('full_batch_avg_worst', model.avg_worst_err, collections=['eval']) # tf.summary.scalar('learning_rate', learning_rate, collections=['eval']) eval_summaries = tf.summary.merge_all('eval') # data augmentation used if config.training.data_augmentation_core is True x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), x_input_placeholder) with tf.Session() as sess: # initialize standard data augmentation if config.training.data_augmentation: if config.data.dataset_name == "cifar-10": data_iterator = cifar10_input.AugmentedCIFAR10Data(raw_iterator, sess) elif config.data.dataset_name == "cifar-100": data_iterator = cifar100_input.AugmentedCIFAR100Data(raw_iterator, sess) elif config.data.dataset_name == "svhn": data_iterator = svhn_input.AugmentedSVHNData(raw_iterator, sess) else: raise ValueError("Unknown dataset name.") else: data_iterator = raw_iterator if simple_train: # attack = SpatialAttack(model, config.attack, config.attack.spatial_method, # worstofk, attack_limits, fo_epsilon, # fo_step_size, fo_num_steps) # attack.simple_train_perturb should return a list of parameters (len(x_batch),3) x_eval_batch = data_iterator.eval_data.xs x_batch_eval = x_eval_batch # the evaluation batch labels are 3 dim transformations now y_batch_eval = data_iterator.eval_data.ys # we pass the label values to the model. as the transformation trans_eval = y_batch_eval eval_dict = {model.x_input: x_batch_eval, model.y_input: y_batch_eval, # group is not used in simple train model.group: np.arange(0, batch_size, 1, dtype="int32"), model.transform: trans_eval, model.is_training: False} else: eval_dict = {model.x_input: data_iterator.eval_data.xs, model.y_input: data_iterator.eval_data.ys, model.group: np.arange(0, batch_size, 1, dtype="int32"), model.transform: np.zeros([data_iterator.eval_data.n, 3]), model.is_training: False} # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) # if eval_during_training: eval_dir = os.path.join(model_dir, 'eval') os.makedirs(eval_dir, exist_ok=True) # eval_summary_writer = tf.summary.FileWriter(eval_dir) sess.run(tf.global_variables_initializer()) training_time = 0.0 run_time_without_eval = 0.0 run_time_adv_ex_creation = 0.0 run_time_train_step = 0.0 #################################### # Main training loop #################################### start_time = time.time() no_epochs_done = 0 # the same as epoch_count, need to merge start_epoch = timer() it_count = 0 epoch_count = 0 acc_sum = 0 printFlag = 0 for ii in range(max_num_training_steps+1): # original batch x_batch, y_batch, epoch_done = data_iterator.train_data.get_next_batch( num_ids, multiple_passes=True) no_epochs_done += epoch_done # noop trans noop_trans = np.zeros([len(x_batch), 3]) # id_batch starts with IDs of original examples id_batch = np.arange(0, num_ids, 1, dtype="int32") if use_core: print("*********Warning: should not be using core in train_reg.py!*********\n") else: if adversarial_training: start = timer() if simple_train: # only generating 1 tilted image per original image, which is equivalent to wo-1 x_batch_inp = x_batch # using the labels as the transformation, hope the model will learn it trans_inp = y_batch else: print("shouldn't be entering here in regression task!\n") quit() x_batch_inp, trans_inp = attack.perturb(x_batch, y_batch, sess) end = timer() training_time += end - start run_time_without_eval += end - start run_time_adv_ex_creation += end - start else: x_batch_inp, trans_inp = x_batch, noop_trans # if simple_train: # y_batch_inp = y_batch # y_batch_adv = transform_parameters # trans_adv = transform_parameters # x_batch_adv = x_batch_inp # id_batch_inp = id_batch # id_batch_adv = id_batch # for adversarial training and plain training, the following # variables coincide # else: y_batch_inp = y_batch y_batch_adv = y_batch trans_adv = trans_inp x_batch_adv = x_batch_inp id_batch_inp = id_batch id_batch_adv = id_batch # feed_dict for training step inp_dict = {model.x_input: x_batch_inp, model.y_input: y_batch_inp, model.group: id_batch_inp, model.transform: trans_inp, model.is_training: False} # separate natural and adversarially transformed examples for eval # nat_dict = {model.x_input: x_batch, # model.y_input: y_batch, # model.group: id_batch, # model.transform: noop_trans, # model.is_training: False} # # adv_dict = {model.x_input: x_batch_adv, # model.y_input: y_batch_adv, # model.group: id_batch_adv, # model.transform: trans_adv, # model.is_training: False} loss = sess.run(model.reg_loss, feed_dict=inp_dict) if ii % num_easyeval_steps == 0 or ii == max_num_training_steps: print("\nin resnet_reg") print("y_input") y_input = sess.run(model.y_input, feed_dict=inp_dict) print(y_input[0:5]) print("\nprediction") prediction = sess.run(model.prediction, feed_dict=inp_dict) print(prediction[0:5]) print(' Easy Evalutaion Step training time error Training Set ') print('avg_abs_err_transX : {}'.format(sess.run(model.avg_abs_err_transX, feed_dict=inp_dict))) print('avg_abs_err_transY : {}'.format(sess.run(model.avg_abs_err_transY, feed_dict=inp_dict))) print('avg_abs_err_rot : {}'.format(sess.run(model.avg_abs_err_rot, feed_dict=inp_dict))) print('avg_rel_err_transX : {}'.format(sess.run(model.avg_rel_err_transX, feed_dict=inp_dict))) print('avg_rel_err_transY : {}'.format(sess.run(model.avg_rel_err_transY, feed_dict=inp_dict))) print('avg_rel_err_rot : {}'.format(sess.run(model.avg_rel_err_rot, feed_dict=inp_dict))) print(' Easy Evalutaion Step training time error Evaluation Set ') print('avg_abs_err_transX : {}'.format(sess.run(model.avg_abs_err_transX, feed_dict=eval_dict))) print('avg_abs_err_transY : {}'.format(sess.run(model.avg_abs_err_transY, feed_dict=eval_dict))) print('avg_abs_err_rot : {}'.format(sess.run(model.avg_abs_err_rot, feed_dict=eval_dict))) print('avg_rel_err_transX : {}'.format(sess.run(model.avg_rel_err_transX, feed_dict=eval_dict))) print('avg_rel_err_transY : {}'.format(sess.run(model.avg_rel_err_transY, feed_dict=eval_dict))) print('avg_rel_err_rot : {}'.format(sess.run(model.avg_rel_err_rot, feed_dict=eval_dict))) # Output to stdout if epoch_done: epoch_time = timer() - start_epoch # ToDo: Log this to file as well # Training accuracy over epoch print('Epoch {}: ({})'.format(epoch_count, datetime.now())) print(' training loss {:.4}'.format(loss)) print(' {:.4} seconds per epoch'.format(epoch_time)) print(' training time error Training Set ') print('avg_abs_err_transX : {}'.format(sess.run(model.avg_abs_err_transX, feed_dict=inp_dict))) print('avg_abs_err_transY : {}'.format(sess.run(model.avg_abs_err_transY, feed_dict=inp_dict))) print('avg_abs_err_rot : {}'.format(sess.run(model.avg_abs_err_rot, feed_dict=inp_dict))) print('avg_rel_err_transX : {}'.format(sess.run(model.avg_rel_err_transX, feed_dict=inp_dict))) print('avg_rel_err_transY : {}'.format(sess.run(model.avg_rel_err_transY, feed_dict=inp_dict))) print('avg_rel_err_rot : {}'.format(sess.run(model.avg_rel_err_rot, feed_dict=inp_dict))) # if ii % config.eval.full_batch_eval_steps == 0: # print_eval_fullbatch(ii, sess, y_batch_eval, batch_size, trans_eval, model, attack, model_dir, global_step, summary_writer) epoch_done = 0 epoch_count += 1 start_epoch = timer() it_count = 1 else: it_count += 1 # acc_sum += nat_acc_tr # Output to stdout if ii % num_output_steps == 0: if ii != 0: training_time = 0.0 # Tensorboard summaries and heavy checkpoints if ii % num_summary_steps == 0: summary = sess.run(err_summaries, feed_dict=inp_dict) summary_writer.add_summary(summary, global_step.eval(sess)) # Write a checkpoint and eval if it's time if ii % num_checkpoint_steps == 0 or ii == max_num_training_steps: # Save checkpoint data (weights) saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step) full_eval_dict = fetch_full_eval_dict(model, batch_size) print_eval_fullbatch(ii,sess, model, model_dir, full_eval_dict) summary_eval = sess.run(eval_summaries, feed_dict=full_eval_dict) summary_writer.add_summary(summary_eval, global_step.eval(sess)) # Evaluation on full evaluation batch # if ((eval_during_training and ii % num_eval_steps == 0 # and ii > 0 and config.attack.use_spatial) or # (eval_during_training and ii == max_num_training_steps and # config.attack.use_spatial)): # print_eval_fullbatch(ii,sess, y_batch_eval, batch_size, trans_eval, model, attack, model_dir, global_step, summary_writer) # Actual training step start = timer() inp_dict[model.is_training] = True sess.run(train_step, feed_dict=inp_dict) end = timer() training_time += end - start run_time_without_eval += end - start run_time_train_step += end - start runtime = time.time() - start_time # this_repo.mark_experiment_as_completed( # runtime=runtime, runtime_wo_eval=run_time_without_eval, # runtime_train_step=run_time_train_step, # runtime_adv_ex_creation=run_time_adv_ex_creation) return 0
def main(): # parse args and get configs args = parser.parse_args() logging.set_verbosity(logging.INFO) random.seed(args.seed) # reload model from checkpoint or train from scratch if args.reload_ckpt != "None": checkpoint_path = os.path.join(local_settings.MODEL_PATH, args.all_checkpoints_folder) checkpoint_folders = os.listdir(checkpoint_path) checkpoint_folder = [ f for f in checkpoint_folders if args.reload_ckpt in f ] if len(checkpoint_folder) == 0: raise Exception("No matching folder found.") elif len(checkpoint_folder) > 1: logging.info(checkpoint_folder) raise Exception("More than one matching folder found.") else: checkpoint_folder = checkpoint_folder[0] logging.info("Restoring from {}".format(checkpoint_folder)) checkpoint_dir = os.path.join(checkpoint_path, checkpoint_folder) if not args.overwrite_configs: # reload configs from file with open(os.path.join(checkpoint_dir, "hparams.pkl"), 'rb') as f: config_dict = pickle.load(f) else: # get configs config_dict = util.get_config(args.config) config_dict = util.update_config(config_dict, args) else: # get configs config_dict = util.get_config(args.config) config_dict = util.update_config(config_dict, args) config_dict_copy = copy.deepcopy(config_dict) config = util.config_to_namedtuple(config_dict) # Initialize the repo logging.info("==> Creating repo..") exp_repo = repo.ExperimentRepo(local_dir_name=config.local_json_dir_name, root_dir=local_settings.MODEL_PATH) if args.reload_ckpt != "None": exp_id = config_dict["id"] else: exp_id = None # Create new experiment exp_id = exp_repo.create_new_experiment(config.dataset, config_dict_copy, exp_id) config_dict_copy["id"] = exp_id # Set up model directory current_time = datetime.datetime.now().strftime(r"%y%m%d_%H%M") ckpt_dir_name = args.all_checkpoints_folder if not DEBUG else 'checkpoints_tmp' ckpt_dir = os.path.join(local_settings.MODEL_PATH, ckpt_dir_name) os.makedirs(ckpt_dir, exist_ok=True) if args.reload_ckpt != "None": model_dir = checkpoint_dir else: model_dir = os.path.join(ckpt_dir, "ckpt_{}_{}".format(current_time, exp_id)) # Save hyperparameter settings os.makedirs(model_dir, exist_ok=True) if not os.path.exists(os.path.join(model_dir, "hparams.json")): with open(os.path.join(model_dir, "hparams.json"), 'w') as f: json.dump(config_dict_copy, f, indent=2, sort_keys=True) with open(os.path.join(model_dir, "hparams.pkl"), 'wb') as f: pickle.dump(config_dict_copy, f) # Set optimizers # learning_rate = tf.keras.optimizers.schedules.ExponentialDecay( # config.learning_rate, config.decay_every, # config.decay_base, staircase=True) # learning rate = 0.02 in paper optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.02) if args.reload_ckpt != "None": # TODO: fix this hack epoch_start = int( sorted([f for f in os.listdir(checkpoint_dir) if 'ckpt-' in f])[-1].split('ckpt-')[1].split('.')[0]) init_gs = 0 else: epoch_start = 0 init_gs = 0 global_step = tf.Variable(initial_value=init_gs, name="global_step", trainable=False, dtype=tf.int64) # Get model model_domain = get_model(config.name_classifier_domain, config) model_label = get_model(config.name_classifier_label, config) # Get datasets if DEBUG: num_batches = 5 else: num_batches = None ds_train1 = _get_dataset( config.dataset, model_label, split=tfds.Split.TRAIN.subsplit(tfds.percent[:50]), batch_size=tf.cast(config.batch_size / 2, tf.int64), num_batches=num_batches, domain=tf.constant(0), e=0.2) ds_train2 = _get_dataset( config.dataset, model_label, split=tfds.Split.TRAIN.subsplit(tfds.percent[-50:]), batch_size=tf.cast(config.batch_size / 2, tf.int64), num_batches=num_batches, domain=tf.constant(1), e=0.1) ds_val = _get_dataset(config.dataset, model_label, split=tfds.Split.TEST, batch_size=config.batch_size, num_batches=num_batches, domain=tf.constant(2), e=0.9) # TODO: add test set - done show_inputs = iter(ds_train1) _ = model_label(next(show_inputs)["image"]) # Set up checkpointing if args.reload_ckpt != "None": ckpt = tf.train.Checkpoint(model=model_label, global_step=global_step) manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, max_to_keep=3) status = ckpt.restore(manager.latest_checkpoint) status.assert_consumed() else: ckpt = tf.train.Checkpoint(model=model_label, global_step=global_step) manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3) writer = tf.summary.create_file_writer(manager._directory) with writer.as_default(), tf.summary.record_if( lambda: int(global_step.numpy()) % 100 == 0): for epoch in range(epoch_start, config.num_epochs): start_time = time.time() # random = np.array([0, 1, 2]) # np.random.shuffle(random) # rand_inputs = [ds_train1, ds_train2, ds_train3] train_one_epoch(model_domain=model_domain, model_label=model_label, train_input1=ds_train1, train_input2=ds_train2, optimizer=optimizer, global_step=global_step, config=config) train1_metr = eval_one_epoch(model_label=model_label, dataset=ds_train1, summary_directory=os.path.join( manager._directory, "train1"), global_step=global_step, config=config, training=False) train2_metr = eval_one_epoch(model_label=model_label, dataset=ds_train2, summary_directory=os.path.join( manager._directory, "train2"), global_step=global_step, config=config, training=False) val_metr = eval_one_epoch(model_label=model_label, dataset=ds_val, summary_directory=os.path.join( manager._directory, "val"), global_step=global_step, config=config, training=False) # if epoch == (config.num_epochs - 1): # # full training set # train_metr = eval_one_epoch(model_classifier=model_classifier, dataset=ds_train_complete, # summary_directory=os.path.join(manager._directory, "train"), # global_step=global_step, config=config, training=False) # # full test_out set # test_out_metr = eval_one_epoch(model_classifier=model_classifier, dataset=ds_val_out, # summary_directory=os.path.join(manager._directory, "val_out"), # global_step=global_step, config=config, training=False) # # full test_in set # test_in_metr = eval_one_epoch(model_classifier=model_classifier, dataset=ds_val_in, # summary_directory=os.path.join(manager._directory, "val_in"), # global_step=global_step, config=config, training=False) manager.save() logging.info("\n #### \n epoch: %d, time: %0.2f" % (epoch, time.time() - start_time)) logging.info("Global step: {}".format(global_step.numpy())) logging.info("train1_accuracy: {:2f}, train1_loss: {:4f}".format( train1_metr['accuracy'], train1_metr['loss'])) logging.info("train2_accuracy: {:2f}, train2_loss: {:4f}".format( train2_metr['accuracy'], train2_metr['loss'])) logging.info("val_accuracy: {:2f}, val_loss: {:4f}".format( val_metr['accuracy'], val_metr['loss'])) if epoch == epoch_start: dir_path = os.path.dirname(os.path.realpath(__file__)) copy_source(dir_path, manager._directory) # Mark experiment as completed # TODO: add other metrics - done exp_repo.mark_experiment_as_completed( exp_id, train1_accuracy=train1_metr['accuracy'], train2_accuracy=train2_metr['accuracy'], val_accuracy=val_metr['accuracy'])
def train(config='configs/fannyconfig.json', save_root_path='/cluster/work/math/fanyang-broglil/CoreRepo', experiment_json_fname='experiments.json', local_json_dir_name='local_json_files', worstofk=None, attack_style=None, attack_limits=None, fo_epsilon=None, fo_step_size=None, fo_num_steps=None, lambda_core=None, num_ids=None, group_size=None, use_core=None, seed=None, save_in_local_json=True, this_repo=None): # reset default graph (needed for running locally with run_jobs_ray.py) tf.reset_default_graph() # get configs config_dict = utilities.get_config(config) config_dict_copy = copy.deepcopy(config_dict) config = utilities.config_to_namedtuple(config_dict) # seeding randomness if seed == None: seed = config.training.tf_random_seed else: config_dict_copy['training']['tf_random_seed'] = seed tf.set_random_seed(seed) np.random.seed(seed) # Setting up training parameters max_num_training_steps = config.training.max_num_training_steps step_size_schedule = config.training.step_size_schedule weight_decay = config.training.weight_decay momentum = config.training.momentum if group_size == None: group_size = config.training.group_size else: config_dict_copy['training']['group_size'] = int(group_size) if num_ids == None: num_ids = config.training.num_ids else: config_dict_copy['training']['num_ids'] = int(num_ids) if lambda_core == None: lambda_core = config.training.lambda_ else: config_dict_copy['training']['lambda_'] = float(lambda_core) if use_core == None: use_core = config.training.use_core else: config_dict_copy['training']['use_core'] = use_core batch_size = config.training.batch_size # number of groups with group size > 1 num_grouped_ids = batch_size - num_ids # number of unique ids needs to be larger than half the desired batch size # so that full batch can be filled up assert num_ids >= batch_size / group_size # currently, code is designed for groups of size 2 # assert batch_size % group_size == 0 adversarial_training = config.training.adversarial_training eval_during_training = config.training.eval_during_training if eval_during_training: num_eval_steps = config.training.num_eval_steps # Setting up output parameters num_output_steps = config.training.num_output_steps num_summary_steps = config.training.num_summary_steps num_checkpoint_steps = config.training.num_checkpoint_steps num_easyeval_steps = config.training.num_easyeval_steps # Setting up the data and the model data_path = config.data.data_path if config.data.dataset_name == "cifar-10": raw_iterator = cifar10_input.CIFAR10Data(data_path) elif config.data.dataset_name == "cifar-100": raw_iterator = cifar100_input.CIFAR100Data(data_path) elif config.data.dataset_name == "svhn": raw_iterator = svhn_input.SVHNData(data_path) else: raise ValueError("Unknown dataset name.") global_step = tf.train.get_or_create_global_step() model_family = config.model.model_family if model_family == "resnet": if config.attack.use_spatial and config.attack.spatial_method == 'fo': diffable = True else: diffable = False model = resnet.Model(config.model, num_ids, diffable) elif model_family == "vgg": if config.attack.use_spatial and config.attack.spatial_method == 'fo': # TODO: add differentiable transformer to vgg.py raise NotImplementedError model = vgg.Model(config.model, num_ids) # uncomment to get a list of trainable variables # model_vars = tf.trainable_variables() # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule] boundaries = boundaries[1:] values = [sss[1] for sss in step_size_schedule] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values) if use_core and lambda_core > 0: total_loss = (model.mean_xent + weight_decay * model.weight_decay_loss + lambda_core * model.core_loss) else: total_loss = model.mean_xent + weight_decay * model.weight_decay_loss optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) train_step = optimizer.minimize(total_loss, global_step=global_step) # Set up adversary if worstofk == None: worstofk = config.attack.random_tries else: config_dict_copy['attack']['random_tries'] = worstofk if fo_epsilon == None: fo_epsilon = config.attack.epsilon else: config_dict_copy['attack']['epsilon'] = fo_epsilon if fo_step_size == None: fo_step_size = config.attack.step_size else: config_dict_copy['attack']['step_size'] = fo_step_size if fo_num_steps == None: fo_num_steps = config.attack.num_steps else: config_dict_copy['attack']['num_steps'] = fo_num_steps # @ Luzius: incorporate being able to choose multiple transformations if attack_style == None: attack_style = 'rotate' # Training attack # L-inf attack if use_spatial is False and use_linf is True # spatial attack if use_spatial is True and use_linf is False # spatial random attack if spatial_method is 'random' # spatial PGD attack if spatial_method is 'fo' attack = SpatialAttack(model, config.attack, config.attack.spatial_method, worstofk, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # Different eval attacks # Random attack # L-inf attack if use_spatial is False and use_linf is True # random (worst-of-1) spatial attack if use_spatial is True # and use_linf is False attack_eval_random = SpatialAttack(model, config.attack, 'random', 1, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # First order attack # L-inf attack if use_spatial is False and use_linf is True # first-order spatial attack if use_spatial is True and use_linf is False attack_eval_fo = SpatialAttack(model, config.attack, 'fo', 1, attack_limits, fo_epsilon, fo_step_size, fo_num_steps) # Grid attack # spatial attack if use_spatial is True and use_linf is False # not executed for L-inf attacks attack_eval_grid = SpatialAttack(model, config.attack, 'grid', None, attack_limits) # TODO(christina): add L-inf attack with random restarts # ------------------START EXPERIMENT ------------------------- # Initialize the Repo print("==> Creating repo..") # Create repo object if it wasn't passed, comment out if repo has issues if this_repo == None: this_repo = exprepo.ExperimentRepo( save_in_local_json=save_in_local_json, json_filename=experiment_json_fname, local_dir_name=local_json_dir_name, root_dir=save_root_path) # Create new experiment if this_repo != None: exp_id = this_repo.create_new_experiment(config.data.dataset_name, model_family, worstofk, attack_style, attack_limits, lambda_core, num_grouped_ids, group_size, config_dict_copy) # Setting up the Tensorboard and checkpoint outputs model_dir = '%s/logdir/%s' % (save_root_path, exp_id) # We add accuracy and xent twice so we can easily make three types of # comparisons in Tensorboard: # - train vs eval (for a single run) # - train of different runs # - eval of different runs saver = tf.train.Saver(max_to_keep=3) tf.summary.scalar('accuracy_nat_train', model.accuracy, collections=['nat']) tf.summary.scalar('accuracy_nat', model.accuracy, collections=['nat']) tf.summary.scalar('xent_nat_train', model.xent / batch_size, collections=['nat']) tf.summary.scalar('xent_nat', model.xent / batch_size, collections=['nat']) tf.summary.image('images_nat_train', model.x_image, collections=['nat']) tf.summary.scalar('learning_rate', learning_rate, collections=['nat']) nat_summaries = tf.summary.merge_all('nat') # data augmentation used if config.training.data_augmentation_core is True x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), x_input_placeholder) with tf.Session() as sess: # initialize standard data augmentation if config.training.data_augmentation: if config.data.dataset_name == "cifar-10": data_iterator = cifar10_input.AugmentedCIFAR10Data( raw_iterator, sess) elif config.data.dataset_name == "cifar-100": data_iterator = cifar100_input.AugmentedCIFAR100Data( raw_iterator, sess) elif config.data.dataset_name == "svhn": data_iterator = svhn_input.AugmentedSVHNData( raw_iterator, sess) else: raise ValueError("Unknown dataset name.") else: data_iterator = raw_iterator eval_dict = { model.x_input: data_iterator.eval_data.xs, model.y_input: data_iterator.eval_data.ys, model.group: np.arange(0, batch_size, 1, dtype="int32"), model.transform: np.zeros([data_iterator.eval_data.n, 3]), model.is_training: False } # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) # if eval_during_training: eval_dir = os.path.join(model_dir, 'eval') os.makedirs(eval_dir, exist_ok=True) eval_summary_writer = tf.summary.FileWriter(eval_dir) sess.run(tf.global_variables_initializer()) training_time = 0.0 run_time_without_eval = 0.0 run_time_adv_ex_creation = 0.0 run_time_train_step = 0.0 #################################### # Main training loop #################################### start_time = time.time() no_epochs_done = 0 # the same as epoch_count, need to merge start_epoch = timer() it_count = 0 epoch_count = 0 acc_sum = 0 for ii in range(max_num_training_steps + 1): # original batch x_batch, y_batch, epoch_done = data_iterator.train_data.get_next_batch( num_ids, multiple_passes=True) no_epochs_done += epoch_done # noop trans noop_trans = np.zeros([len(x_batch), 3]) # id_batch starts with IDs of original examples id_batch = np.arange(0, num_ids, 1, dtype="int32") if use_core: # first num_id examples of batch are natural x_batch_inp = x_batch y_batch_inp = y_batch trans_inp = noop_trans id_batch_inp = id_batch start = timer() for _ in range(group_size - 1): if config.training.data_augmentation_core: raise NotImplementedError # create rotated examples x_batch_adv_i, trans_adv_i = attack.perturb( x_batch, y_batch, sess) # construct new batches including rotated examples x_batch_inp = np.concatenate((x_batch_inp, x_batch_adv_i), axis=0) y_batch_inp = np.concatenate((y_batch_inp, y_batch), axis=0) trans_inp = np.concatenate((trans_inp, trans_adv_i), axis=0) id_batch_inp = np.concatenate((id_batch_inp, id_batch), axis=0) end = timer() training_time += end - start run_time_without_eval += end - start run_time_adv_ex_creation += end - start trans_adv = trans_inp[num_ids:, ...] id_batch_adv = id_batch_inp[num_ids:] y_batch_adv = y_batch_inp[num_ids:] x_batch_adv = x_batch_inp[num_ids:, ...] else: if adversarial_training: start = timer() x_batch_inp, trans_inp = attack.perturb( x_batch, y_batch, sess) end = timer() training_time += end - start run_time_without_eval += end - start run_time_adv_ex_creation += end - start else: x_batch_inp, trans_inp = x_batch, noop_trans # for adversarial training and plain training, the following # variables coincide y_batch_inp = y_batch y_batch_adv = y_batch trans_adv = trans_inp x_batch_adv = x_batch_inp id_batch_inp = id_batch id_batch_adv = id_batch # feed_dict for training step inp_dict = { model.x_input: x_batch_inp, model.y_input: y_batch_inp, model.group: id_batch_inp, model.transform: trans_inp, model.is_training: False } # separate natural and adversarially transformed examples for eval nat_dict = { model.x_input: x_batch, model.y_input: y_batch, model.group: id_batch, model.transform: noop_trans, model.is_training: False } adv_dict = { model.x_input: x_batch_adv, model.y_input: y_batch_adv, model.group: id_batch_adv, model.transform: trans_adv, model.is_training: False } ########### Outputting/saving weights and evaluations ########### acc_grid_te = -1.0 avg_xent_grid_te = -1.0 acc_fo_te = -1.0 avg_xent_fo_te = -1.0 saved_weights = 0 # Compute training accuracy on this minibatch nat_acc_tr = 100 * sess.run(model.accuracy, feed_dict=nat_dict) # Output to stdout if epoch_done: epoch_time = timer() - start_epoch # Average av_acc = acc_sum / it_count # ToDo: Log this to file as well # Training accuracy over epoch print('Epoch {}: ({})'.format(epoch_count, datetime.now())) print(' training natural accuracy {:.4}%'.format(av_acc)) print(' {:.4} seconds per epoch'.format(epoch_time)) # Accuracy on entire test set nat_acc_te = 100 * sess.run(model.accuracy, feed_dict=eval_dict) print( ' test set natural accuracy {:.4}%'.format(nat_acc_te)) # Set loss sum, it count back to zero acc_sum = nat_acc_tr epoch_done = 0 epoch_count += 1 start_epoch = timer() it_count = 1 else: it_count += 1 acc_sum += nat_acc_tr # Output to stdout if ii % num_output_steps == 0: # nat_acc_tr = 100 * sess.run(model.accuracy, feed_dict=nat_dict) adv_acc_tr = 100 * sess.run(model.accuracy, feed_dict=adv_dict) inp_acc_tr = 100 * sess.run(model.accuracy, feed_dict=inp_dict) # print('Step {}: ({})'.format(ii, datetime.now())) # print(' training nat accuracy {:.4}%'.format(nat_acc_tr)) # print(' training adv accuracy {:.4}%'.format(adv_acc_tr)) # print(' training inp accuracy {:.4}%'.format(inp_acc_tr)) if ii != 0: # print(' {} examples per second'.format( # num_output_steps * batch_size / training_time)) training_time = 0.0 # Tensorboard summaries and heavy checkpoints if ii % num_summary_steps == 0: summary = sess.run(nat_summaries, feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) # Write a checkpoint and eval if it's time if ii % num_checkpoint_steps == 0 or ii == max_num_training_steps: # Save checkpoint data (weights) saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step) saved_weights = 1 # Write evaluation meta data for checkpoint if ii % num_easyeval_steps == 0 or ii == max_num_training_steps: # Get training accuracies nat_acc_tr = 100 * sess.run(model.accuracy, feed_dict=nat_dict) adv_acc_tr = 100 * sess.run(model.accuracy, feed_dict=adv_dict) inp_acc_tr = 100 * sess.run(model.accuracy, feed_dict=inp_dict) # Evaluation on random and natural [ acc_nat_te, acc_rand_adv_te, avg_xent_nat_te, avg_xent_adv_te ] = evaluate(model, attack_eval_random, sess, config, 'random', data_path, None) # Evaluation on grid (only for spatial attacks) if ((eval_during_training and ii % num_eval_steps == 0 and ii > 0 and config.attack.use_spatial) or (eval_during_training and ii == max_num_training_steps and config.attack.use_spatial)): if config.attack.use_spatial and config.attack.spatial_method == 'fo': # Evaluation on first-order PDG attack (too expensive to # evaluate more frequently on whole dataset) [_, acc_fo_te, _, avg_xent_fo_te] = evaluate(model, attack_eval_fo, sess, config, 'fo', data_path, None) # Evaluation on grid [_, acc_grid_te, _, avg_xent_grid_te ] = evaluate(model, attack_eval_grid, sess, config, "grid", data_path, eval_summary_writer) chkpt_id = this_repo.create_training_checkpoint( exp_id, training_step=ii, epoch=no_epochs_done, train_acc_nat=nat_acc_tr, train_acc_adv=adv_acc_tr, train_acc_inp=inp_acc_tr, test_acc_nat=acc_nat_te, test_acc_adv=acc_rand_adv_te, test_acc_fo=acc_fo_te, test_acc_grid=acc_grid_te, test_loss_nat=avg_xent_nat_te, test_loss_adv=avg_xent_adv_te, test_loss_fo=avg_xent_fo_te, test_loss_grid=avg_xent_grid_te) if saved_weights == 0: # Save checkpoint data (weights) saver.save( sess, os.path.join(model_dir, '{}_checkpoint'.format(chkpt_id))) # Actual training step start = timer() inp_dict[model.is_training] = True sess.run(train_step, feed_dict=inp_dict) end = timer() training_time += end - start run_time_without_eval += end - start run_time_train_step += end - start runtime = time.time() - start_time this_repo.mark_experiment_as_completed( exp_id, train_acc_nat=nat_acc_tr, train_acc_adv=adv_acc_tr, train_acc_inp=inp_acc_tr, test_acc_nat=acc_nat_te, test_acc_adv=acc_rand_adv_te, test_acc_fo=acc_fo_te, test_acc_grid=acc_grid_te, runtime=runtime, runtime_wo_eval=run_time_without_eval, runtime_train_step=run_time_train_step, runtime_adv_ex_creation=run_time_adv_ex_creation) return 0
def train(config='configs/cifar10_config_stn.json', save_root_path='/cluster/work/math/fanyang-broglil/CoreRepo', worstofk=None, attack_style=None, attack_limits=None, lambda_core=None, num_grouped_ids=None, num_ids=None, group_size=None, use_core=None, seed=None, this_repo=None): config_dict = utilities.get_config(config) config_dict_copy = copy.deepcopy(config_dict) # model_dir = config_dict['model']['output_dir'] # if not os.path.exists(model_dir): # os.makedirs(model_dir) # # keep the configuration file with the model for reproducibility # with open(os.path.join(model_dir, 'config.json'), 'w') as f: # json.dump(config_dict, f, sort_keys=True, indent=4) config = utilities.config_to_namedtuple(config_dict) # seeding randomness if seed == None: seed = config.training.tf_random_seed else: config_dict_copy['training']['tf_random_seed'] = seed tf.set_random_seed(seed) np.random.seed(seed) # Setting up training parameters max_num_epochs = config.training.max_num_epochs step_size_schedule = config.training.step_size_schedule weight_decay = config.training.weight_decay momentum = config.training.momentum num_ids = config.training.num_ids # number of IDs per minibatch if group_size == None: group_size = config.training.group_size else: config_dict_copy['training']['group_size'] = group_size if num_grouped_ids == None: num_grouped_ids = config.training.num_grouped_ids else: config_dict_copy['training']['num_grouped_ids'] = num_grouped_ids if num_ids == None: num_ids = config.training.num_ids else: config_dict_copy['training']['num_ids'] = num_ids if lambda_core == None: lambda_core = config.training.lambda_ else: config_dict_copy['training']['lambda_'] = lambda_core if use_core == None: use_core = config.training.use_core else: config_dict_copy['training']['use_core'] = use_core adversarial_training = config.training.adversarial_training eval_during_training = config.training.eval_during_training if eval_during_training: num_eval_steps = config.training.num_eval_steps # Setting up output parameters num_summary_steps = config.training.num_summary_steps num_checkpoint_steps = config.training.num_checkpoint_steps num_easyeval_steps = config.training.num_easyeval_steps # mini batch size per iteration # ToDo: need to make this support variable number of num_grouped_ids batch_size = num_ids + num_grouped_ids # Setting up model and loss model_family = config.model.model_family with_transformer = config.model.transformer translation_model = config.model.translation_model if model_family == "resnet": model = loc_net.Model(config.model) else: print("Model family does not exist") exit() if use_core: total_loss = model.y_loss #model.mean_xent + weight_decay * model.weight_decay_loss + lambda_core * model.core_loss2 else: total_loss = model.y_loss #model.mean_xent + weight_decay * model.weight_decay_loss # Setting up the data and the model data_path = config.data.data_path if config.data.dataset_name == "cifar-10": raw_cifar = cifar10_input.CIFAR10Data(data_path) elif config.data.dataset_name == "cifar-100": raw_cifar = cifar100_input.CIFAR100Data(data_path) else: raise ValueError("Unknown dataset name.") # uncomment to get a list of trainable variables # model_vars = tf.trainable_variables() # slim.model_analyzer.analyze_vars(model_vars, print_info=True) # Setting up the optimizer boundaries = [int(sss[0]) for sss in step_size_schedule] boundaries = boundaries[1:] values = [sss[1] for sss in step_size_schedule] global_step = tf.train.get_or_create_global_step() learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values) optimizer = tf.train.MomentumOptimizer(learning_rate, momentum) #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, # name="Adam") train_step = optimizer.minimize(total_loss, global_step=global_step) # Set up adversary if worstofk == None: worstofk = config.attack.random_tries else: config_dict_copy['attack']['random_tries'] = worstofk # @ Luzius: incorporate being able to choose multiple transformations if attack_style == None: attack_style = 'rotate' # Training attack attack = SpatialAttack(model, config.attack, 'random', worstofk, attack_limits) # Different eval attacks # Same attack as worstofk # @ Luzius: currently the names are not clear/consistent since I wasn't sure if we actually want random or not since you originally had your attack like that but I feel like it should rather be worstofk? # attack_eval_adv = SpatialAttack(model, config.attack, 'random', worstofk, attack_limits) attack_eval_random = SpatialAttack(model, config.attack, 'random', 1, attack_limits) # Grid attack attack_eval_grid = SpatialAttack(model, config.attack, 'grid', None, attack_limits) # ------------------START EXPERIMENT ------------------------- # Initialize the Repo print("==> Creating repo..") # Create repo object if it wasn't passed, comment out if repo has issues if this_repo == None: this_repo = exprepo.ExperimentRepo(root_dir=save_root_path) # Create new experiment if this_repo != None: exp_id = this_repo.create_new_experiment('cifar-10', model_family, worstofk, attack_style, attack_limits, lambda_core, num_grouped_ids, group_size, config_dict_copy) # Setting up the Tensorboard and checkpoint outputs model_dir = '%s/logdir/%s' % (save_root_path, exp_id) os.makedirs(model_dir, exist_ok=True) # We add accuracy and xent twice so we can easily make three types of # comparisons in Tensorboard: # - train vs eval (for a single run) # - train of different runs # - eval of different runs saver = tf.train.Saver(max_to_keep=3) tf.summary.scalar('accuracy_nat_train', model.accuracy, collections=['nat']) tf.summary.scalar('accuracy_nat', model.accuracy, collections=['nat']) tf.summary.scalar('xent_nat_train', model.xent / batch_size, collections=['nat']) tf.summary.scalar('xent_nat', model.xent / batch_size, collections=['nat']) tf.summary.image('images_nat_train', model.x_image, collections=['nat']) tf.summary.scalar('learning_rate', learning_rate, collections=['nat']) nat_summaries = tf.summary.merge_all('nat') #dataAugmentation x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3]) flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), x_input_placeholder) tot_samp = raw_cifar.train_data.n max_num_iterations = int(np.floor((tot_samp / num_ids) * max_num_epochs)) print("Total # of samples is: %d; This exp. will run %d iterations" % (tot_samp, max_num_iterations)) # Compute the (epoch) gaps between summary, worstof1eval, checkpoints should happen summary_gap = int(np.floor(max_num_epochs / num_summary_steps)) easyeval_gap = int(np.floor(max_num_epochs / num_easyeval_steps)) checkpoint_gap = int(np.floor(max_num_epochs / num_checkpoint_steps)) with tf.Session() as sess: # initialize data augmentation if config.training.data_augmentation: if config.data.dataset_name == "cifar-10": cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess) elif config.data.dataset_name == "cifar-100": cifar = cifar100_input.AugmentedCIFAR100Data(raw_cifar, sess) else: raise ValueError("Unknown dataset name.") else: cifar = raw_cifar cifar_eval_dict = { model.x_input: cifar.eval_data.xs, model.y_input: cifar.eval_data.ys, model.group: np.arange(0, batch_size, 1, dtype="int32"), model.transform: np.zeros([cifar.eval_data.n, 3]), model.is_training: False } # Initialize the summary writer, global variables, and our time counter. summary_writer = tf.summary.FileWriter(model_dir, sess.graph) #if eval_during_training: eval_dir = os.path.join(model_dir, 'eval') os.makedirs(eval_dir, exist_ok=True) eval_summary_writer = tf.summary.FileWriter(eval_dir) sess.run(tf.global_variables_initializer()) training_time = 0.0 #################################### # Main training loop #################################### # Initialize cache variables start_time = time.time() start_epoch = timer() it_count = 0 epoch_count = 0 acc_sum = 0 it_summary = 0 it_easyeval = 0 it_ckpt = 0 adv_time = 0 train_time = 0 for ii in range(max_num_iterations + 1): x_batch, y_batch, epoch_done = cifar.train_data.get_next_batch( num_ids, multiple_passes=True) noop_trans = np.zeros([len(x_batch), 3]) x_batch_nat = x_batch y_batch_nat = y_batch id_batch = np.arange(0, num_ids, 1, dtype="int32") if use_core: # Create rotated examples start = timer() ids = np.arange(0, num_grouped_ids, 1, dtype="int32") for i in range(config.training.group_size): if config.training.data_augmentation_core: x_batch_core = sess.run( flipped, feed_dict={ x_input_placeholder: x_batch[0:num_grouped_ids, :, :, :] }) else: x_batch_core = x_batch[0:num_grouped_ids, :, :, :] x_batch_group, trans_group = attack.perturb( x_batch_core, y_batch[0:num_grouped_ids], sess) #construct new batches including rotated examples x_batch_nat = np.concatenate((x_batch_nat, x_batch_group), axis=0) y_batch_nat = np.concatenate((y_batch_nat, y_batch), axis=0) noop_trans = np.concatenate((noop_trans, trans_group), axis=0) id_batch = np.concatenate((id_batch, ids), axis=0) end = timer() training_time += end - start adv_time += end - start else: if adversarial_training: start = timer() x_batch_nat, noop_trans = attack.perturb( x_batch, y_batch, sess) end = timer() adv_time += end - start else: x_batch_nat, noop_trans = x_batch, noop_trans nat_dict = { model.x_input: x_batch_nat, model.y_input: y_batch_nat, model.group: id_batch, model.transform: noop_trans, model.is_training: False } ################# Outputting/saving weights and evaluations ############### nat_acc = -1.0 acc_grid = -1.0 avg_xent_grid = -1.0 saved_weights = 0 # Compute training accuracy on this minibatch train_nat_acc = sess.run(model.accuracy, feed_dict=nat_dict) # Output to stdout if epoch_done: epoch_time = timer() - start_epoch # Average av_acc = acc_sum / it_count # ToDo: Log this to file as well # Training accuracy over epoch print('Epoch {}: ({})'.format(epoch_count, datetime.now())) print(' training natural accuracy {:.4}%'.format(av_acc * 100)) print(' {:.4} seconds per epoch'.format(epoch_time)) # Accuracy on entire test set test_nat_acc = sess.run(model.accuracy, feed_dict=cifar_eval_dict) print(' test set natural accuracy {:.4}%'.format( test_nat_acc * 100)) # print(' {:.4} seconds for test evaluation'.format(test_time)) print("example TIME") print(adv_time) print("train TIME") print(train_time) ########### Things to do every xxx epochs ############# # Check if worstof1 eval should be run if it_summary == summary_gap - 1 or epoch_count == max_num_epochs - 1: summary = sess.run(nat_summaries, feed_dict=nat_dict) summary_writer.add_summary(summary, global_step.eval(sess)) it_summary = 0 else: it_summary += 1 if it_easyeval == easyeval_gap - 1 or epoch_count == max_num_epochs - 1: # Evaluation on adv and natural [acc_nat, acc_adv, avg_xent_nat, avg_xent_adv] = evaluate(model, attack_eval_random, sess, config, "random", data_path, None) # Save in checkpoint chkpt_id = this_repo.create_training_checkpoint( exp_id, training_step=ii, epoch=epoch_count, train_acc_nat=nat_acc, test_acc_adv=acc_adv, test_acc_nat=acc_nat, test_loss_adv=avg_xent_adv, test_loss_nat=avg_xent_nat) it_easyeval = 0 else: it_easyeval += 1 startt = timer() if it_ckpt == checkpoint_gap - 1 or epoch_count == max_num_epochs - 1: # Create checkpoint id if non-existent if not chkpt_id: chkpt_id = this_repo.create_training_checkpoint( exp_id, training_step=ii, epoch=epoch_count, train_acc_nat=train_nat_acc, test_acc_nat=test_nat_acc) # Save checkpoint data (weights) saver.save( sess, os.path.join(model_dir, '{}_checkpoint'.format(chkpt_id))) print(' chkpt saving took {:.4}s '.format(timer() - startt)) it_ckpt = 0 else: it_ckpt += 1 # Set loss sum, it count back to zero acc_sum = train_nat_acc epoch_done = 0 epoch_count += 1 start_epoch = timer() it_count = 1 else: it_count += 1 acc_sum += train_nat_acc # Actual training step start = timer() nat_dict[model.is_training] = True sess.run(train_step, feed_dict=nat_dict) training_time += timer() - start train_time += timer() - start runtime = time.time() - start_time # Do all evaluations in the last step - on grid [_, acc_grid, _, avg_xent_grid] = evaluate(model, attack_eval_grid, sess, config, "grid", data_path, eval_summary_writer) this_repo.mark_experiment_as_completed(exp_id, train_acc_nat=nat_acc, test_acc_adv=acc_adv, test_acc_nat=acc_nat, test_acc_grid=acc_grid, runtime=runtime) return 0