def show_mask_norms(mask_folder, model_name = 'gtsrb', out_png=False): options = Options() options.model_name = model_name options = justify_options_for_model(options, model_name) options.data_subset = 'validation' options.batch_size = 1 options.num_epochs = 1 options.net_mode = 'backdoor_def' options.load_mode = 'all' options.fix_level = 'all' options.build_level = 'mask_only' options.selected_training_labels = None options.gen_ori_label = False ld_paths = dict() root_folder = mask_folder print(root_folder) dirs = os.listdir(root_folder) for d in dirs: tt = d.split('_')[0] if len(tt) == 0: continue try: tgt_id = int(tt) except: continue ld_paths[tgt_id] = get_last_checkpoint_in_folder(os.path.join(root_folder,d)) print(ld_paths) model, dataset, input_list, feed_list, out_op, aux_out_op = get_output(options, model_name=model_name) model.add_backbone_saver() mask_abs = dict() config = tf.ConfigProto() config.gpu_options.allow_growth = True import cv2 init_op = tf.global_variables_initializer() local_var_init_op = tf.local_variables_initializer() table_init_ops = tf.tables_initializer() # iterator_initilizor in here with tf.Session(config=config) as sess: sess.run(init_op) sess.run(local_var_init_op) sess.run(table_init_ops) for k, v in ld_paths.items(): print(v) model.load_backbone_model(sess, v) pattern, mask = sess.run([out_op, aux_out_op]) pattern = (pattern[0]+1)/2 mask = mask[0] mask_abs[k] = np.sum(np.abs(mask)) if out_png: show_name = '%d_pattern.png'%k out_pattern = pattern*255 cv2.imwrite(show_name, out_pattern.astype(np.uint8)) show_name = '%d_mask.png'%k out_mask = mask*255 cv2.imwrite(show_name, out_mask.astype(np.uint8)) show_name = '%d_color.png'%k out_color = pattern*mask*255 cv2.imwrite(show_name, out_color.astype(np.uint8)) #cv2.imshow(show_name,out_pattern) #cv2.waitKey() #break out_norms = np.zeros([len(mask_abs),2]) z = 0 for k,v in mask_abs.items(): out_norms[z][0] = k out_norms[z][1] = v z = z+1 print('===Results===') np.save('out_norms.npy', out_norms) print('write norms to out_norms.npy') #return vs = list(mask_abs.values()) import statistics me = statistics.median(vs) abvs = abs(vs - me) mad = statistics.median(abvs) rvs = abvs / (mad * 1.4826) print(mask_abs) print(rvs) x_arr = [i for i in range(len(mask_abs))] import matplotlib.pyplot as plt plt.figure() plt.boxplot(rvs) plt.show()
def testtest(params): print(FLAGS.net_mode) print(FLAGS.batch_size) print(FLAGS.num_epochs) print(params.batch_size) print(params.num_epochs) options = Options() options.data_mode = 'normal' options.data_subset = 'train' dataset = CifarDataset(options) model = Model_Builder('cifar10', dataset.num_classes, options, params) labels, images = dataset.data images = np.asarray(images) data_dict = dict() data_dict['labels'] = labels data_dict['images'] = images save_to_mat('cifar-10.mat', data_dict) exit(0) p_class = dataset.get_input_preprocessor() preprocessor = p_class(options.batch_size, model.get_input_shapes('train'), options.batch_size, model.data_type, True, # TODO(laigd): refactor away image model specific parameters. distortions=params.distortions, resize_method='bilinear') ds = preprocessor.create_dataset(batch_size=options.batch_size, num_splits = 1, batch_size_per_split = options.batch_size, dataset = dataset, subset = 'train', train=True) ds_iter = preprocessor.create_iterator(ds) input_list = ds_iter.get_next() print(input_list) # input_list = preprocessor.minibatch(dataset, subset='train', params=params) # img, lb = input_list # lb = input_list['img_path'] lb = input_list print(lb) b = 0 show = False local_var_init_op = tf.local_variables_initializer() table_init_ops = tf.tables_initializer() # iterator_initilizor in here with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(local_var_init_op) sess.run(table_init_ops) for i in range(330): print('%d: ' % i) if b == 0 or b+options.batch_size > dataset.num_examples_per_epoch('train'): show = True b = b+options.batch_size rst = sess.run(lb) # rst = rst.decode('utf-8') print(len(rst))