def load_test_data(run_data, train, backdoor): return load_data( run_data["options"], train=train, shuffle=False, backdoor=backdoor )
def __init__(self, options, classes, controller): super().__init__(options, classes) self.shadow_data = load_data( options, [options.adversaries['from']], backdoor=options.adversaries['type'].find('backdoor') >= 0) self.shadow_data['dataloader'].dataset.targets[:] = \ options.adversaries['to'] controller.add_sybil(self) self.attacking = False
def __init__(self, options, classes): super().__init__(options, classes) self.shadow_data = load_data( options, [options.adversaries['from']], backdoor=options.adversaries['type'].find('backdoor') >= 0) self.shadow_data['dataloader'].dataset.targets[:] = \ options.adversaries['to'] if (tt := self.options.adversaries['toggle_times']): self.toggle_time = cycle(tt)
from models.wrapper_T import * import tensorflow as tf tfk = tf.keras # Hyper Parameters n_classes = 3 batch_size = 64 n_epochs = 50 # dataset print('[Dataset]') with time_counter(): x, y, _ = load_data( 'data/dataset/', classes=['normal', 'nude', 'swimwear'], size=128, cache_path='data/data_cache/dataset_size128_autopad.pkl', auto_pad_val=True) x_train, x_test, y_train, y_test = train_test_split(x, y, train_rate=0.5) n_train = len(x_train) _random_cutout = gen_random_cutout(42) @tf.function def augment(image, label): image, label = random_flip_left_right(image, label) # image, label = random_flip_up_down(image, label) image, label = _random_cutout(image, label) image, label = random_rotate_90(image, label) return image, label
trials = [int(el.split('_')[1]) for el in dirs] trials.insert(0, -1) trial = np.max(trials) + 1 os.mkdir('results/{}/trial_{}'.format(hyperparam_name, trial)) with tf.Graph().as_default(): model = models.gan.GAN(**hyperparameters) for exploit in exploits: data = [] for i in range(5): trX, trY = ds.load_data( ( './data/three-step/{}/subset_{}/train_set.csv' ).format(exploit, i) ) model.train(trX, trY) for i in range(5): teX, teY = ds.load_data( ( './data/three-step/{}/subset_{}/test_set.csv' ).format(exploit, i) ) d = model.test(teX, teY) data.append(d) raw_data.append(d)
# tensorflow messageの抑制 import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' tfk = tf.keras # tfk.backend.set_floatx('float32') # Params batch_size = 64 n_epochs = 10 lr = 0.001 # Dataset print('[Dataset]') with time_counter(): x, y, _ = load_data('data/dataset/', classes=['normal', 'nude', 'swimwear'], size=256, cache_path='data/data_cache/dataset_size256_autopad.pkl') x_train, x_test, y_train, y_test = train_test_split(x, y, train_rate=0.5) n_train = len(x_train) train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(n_train).batch(batch_size) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size) print('x_train: {}'.format(x_train.shape)) print('y_train: {}'.format(y_train.shape)) print('x_test: {}'.format(x_test.shape)) print('y_test: {}'.format(y_test.shape)) in_shape = x_train.shape[1:] del x_train, y_train, x_test, y_test # Model # model = SimpleCNN(in_shape, n_out=3) # model.load_weights('simplecnn/best_param')
import models.wrapper_T tfk = tf.keras tfk.backend.set_floatx('float32') # Params n_classes = 2 batch_size = 64 n_epochs = 100 # lr = 0.001 # Dataset print('[Dataset]') with time_counter(): x, y, _ = load_data('data/sample/', classes=['normal', 'nude', 'swimwear'], size=128, auto_pad_val=True) ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size) print(_) print('x: {}'.format(x.shape)) print('y: {}'.format(y.shape)) in_shape = x.shape[1:] del x, y model = models.wrapper.DenseNet121(weights=None, classes=n_classes, input_shape=in_shape) model_name = 'densenet121'
def __init__(self, options, classes): self.data = load_data(options, train=True, classes=classes) params = options.model_params self.net = load_model(params) self.options = options
'orzhttpd_rootdir', 'orzhttpd_restore' ] summaries = {'hyperparameters': hyperparameters} raw_data = [] net_data = {exploit: [] for exploit in exploits} model = models.gan.GAN(**hyperparameters) for exploit in exploits: data = [] for i in range(5): trX, trY = ds.load_data( ( './data/ndss/ts/{}/subset_{}/train_set.csv' ).format(exploit, i) ) model.train(trX, trY) for j in range(5): teX, teY = ds.load_data( ( './data/ndss/ts/{}/subset_{}/test_set.csv' ).format(exploit, j) ) d = model.test(teX, teY) data.append(d) raw_data.append(d)
from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D, GlobalMaxPooling2D from keras.models import Model from keras.layers import Input from keras.layers.core import Dropout, Flatten, Dense from keras.optimizers import Adam from utils import datasets img_size = 128 nb_epochs = 15 ############################################################# # Load data ############################################################# X_train, Y_train_masks, Y_train_classes, X_val, Y_val_masks, Y_val_classes, Y_train_bbox, Y_val_bbox = datasets.load_data( ) print(X_train.shape) print(Y_train_classes.shape) print(X_val.shape) print(Y_val_classes.shape) ############################################################# # Data generators ############################################################# train_datagen = ImageDataGenerator(rescale=1 / 255, zoom_range=0.25, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True,