def load_decoder(self): """ Load the decoder. """ assert self.args.decoder_files decoder_files = self.args.decoder_files.split(',') for decoder_file in decoder_files: assert os.path.exists( decoder_file), 'could not find %s' % decoder_file log('[Training] using %d input channels' % self.train_images.shape[3]) decoder_units = list(map(int, self.args.decoder_units.split(','))) if len(decoder_files) > 1: log('[Training] loading multiple decoders') decoders = [] for i in range(len(decoder_files)): decoder = models.LearnedDecoder( self.args.latent_space_size, resolution=(self.train_images.shape[3], self.train_images.shape[1], self.train_images.shape[2]), architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args. decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[i]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoders.append(decoder) decoder.eval() log('[Training] loaded %s' % decoder_files[i]) self.decoder = models.SelectiveDecoder( decoders, resolution=(self.train_images.shape[3], self.train_images.shape[1], self.train_images.shape[2])) else: log('[Training] loading one decoder') decoder = models.LearnedDecoder( self.args.latent_space_size, resolution=(self.train_images.shape[3], self.train_images.shape[1], self.train_images.shape[2]), architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args. decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[0]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoder.eval() log('[Training] read decoder') self.decoder = decoder self.decoder_classifier = models.DecoderClassifier( self.decoder, self.model)
def load_data_and_model(self): """ Load data and model. """ self.test_images = utils.read_hdf5(self.args.test_images_file).astype( numpy.float32) if len(self.test_images.shape) < 4: self.test_images = numpy.expand_dims(self.test_images, axis=3) resolution = (self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]) log('[Visualization] read %s' % self.args.test_images_file) self.perturbations = utils.read_hdf5( self.args.perturbations_file).astype(numpy.float32) self.perturbations = numpy.swapaxes(self.perturbations, 0, 1) log('[Visualization] read %s' % self.args.perturbations_file) self.success = utils.read_hdf5(self.args.success_file) self.success = numpy.swapaxes(self.success, 0, 1) log('[Visualization] read %s' % self.args.success_file) self.accuracy = utils.read_hdf5(self.args.accuracy_file) log('[Visualization] read %s' % self.args.success_file) self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype( numpy.float32) self.test_theta = self.test_theta[:self.perturbations.shape[0]] log('[Visualization] read %s' % self.args.test_theta_file) self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype( numpy.int) self.test_codes = self.test_codes[:, self.args.label_index] self.N_class = numpy.max(self.test_codes) + 1 self.test_codes = self.test_codes[:self.perturbations.shape[0]] log('[Visualization] read %s' % self.args.test_codes_file) network_units = list(map(int, self.args.network_units.split(','))) self.classifier = models.Classifier( self.N_class, resolution=resolution, architecture=self.args.network_architecture, activation=self.args.network_activation, batch_normalization=not self.args.network_no_batch_normalization, start_channels=self.args.network_channels, dropout=self.args.network_dropout, units=network_units) assert os.path.exists( self.args.classifier_file ), 'state file %s not found' % self.args.classifier_file state = State.load(self.args.classifier_file) log('[Visualization] read %s' % self.args.classifier_file) self.classifier.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(self.classifier): log('[Visualization] classifier is not CUDA') self.classifier = self.classifier.cuda() log('[Visualization] loaded classifier') self.classifier.eval() log('[Visualization] set classifier to eval') assert self.args.decoder_files decoder_files = self.args.decoder_files.split(',') for decoder_file in decoder_files: assert os.path.exists( decoder_file), 'could not find %s' % decoder_file log('[Visualization] using %d input channels' % self.test_images.shape[3]) decoder_units = list(map(int, self.args.decoder_units.split(','))) if len(decoder_files) > 1: log('[Visualization] loading multiple decoders') decoders = [] for i in range(len(decoder_files)): decoder = models.LearnedDecoder( self.args.latent_space_size, resolution=resolution, architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args. decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[i]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoders.append(decoder) decoder.eval() log('[Visualization] loaded %s' % decoder_files[i]) self.decoder = models.SelectiveDecoder(decoders, resolution=resolution) else: log('[Visualization] loading one decoder') decoder = models.LearnedDecoder( self.args.latent_space_size, resolution=resolution, architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args. decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[0]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoder.eval() log('[Visualization] read decoder') self.decoder = decoder
def load_data(self): """ Load data and model. """ with logw('[Detection] read %s' % self.args.train_images_file): self.nearest_neighbor_images = utils.read_hdf5(self.args.train_images_file) assert len(self.nearest_neighbor_images.shape) == 3 with logw('[Detection] read %s' % self.args.test_images_file): self.test_images = utils.read_hdf5(self.args.test_images_file) if len(self.test_images.shape) < 4: self.test_images = numpy.expand_dims(self.test_images, axis=3) with logw('[Detection] read %s' % self.args.perturbations_file): self.perturbations = utils.read_hdf5(self.args.perturbations_file) assert len(self.perturbations.shape) == 4 with logw('[Detection] read %s' % self.args.success_file): self.success = utils.read_hdf5(self.args.success_file) with logw('[Detection] read %s' % self.args.accuracy_file): self.accuracy = utils.read_hdf5(self.args.accuracy_file) self.perturbations = numpy.swapaxes(self.perturbations, 0, 1) num_attempts = self.perturbations.shape[1] self.test_images = self.test_images[:self.perturbations.shape[0]] self.train_images = self.nearest_neighbor_images[:self.perturbations.shape[0]] self.accuracy = self.accuracy[:self.perturbations.shape[0]] self.perturbations = self.perturbations.reshape((self.perturbations.shape[0]*self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3])) self.success = numpy.swapaxes(self.success, 0, 1) self.success = self.success.reshape((self.success.shape[0]*self.success.shape[1])) self.accuracy = numpy.repeat(self.accuracy, num_attempts, axis=0) self.test_images = numpy.repeat(self.test_images, num_attempts, axis=0) self.train_images = numpy.repeat(self.train_images, num_attempts, axis=0) max_samples = self.args.max_samples self.success = self.success[:max_samples] self.accuracy = self.accuracy[:max_samples] self.perturbations = self.perturbations[:max_samples] self.test_images = self.test_images[:max_samples] self.train_images = self.train_images[:max_samples] if self.args.mode == 'true': assert self.args.database_file assert self.args.test_codes_file assert self.args.test_theta_file self.test_codes = utils.read_hdf5(self.args.test_codes_file) log('[Detection] read %s' % self.args.test_codes_file) self.test_theta = utils.read_hdf5(self.args.test_theta_file) log('[Detection] read %s' % self.args.test_theta_file) self.test_codes = self.test_codes[:self.perturbations.shape[0]] self.test_theta = self.test_theta[:self.perturbations.shape[0]] self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0) self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0) self.test_codes = self.test_codes[:max_samples] self.test_theta = self.test_theta[:max_samples] database = utils.read_hdf5(self.args.database_file) log('[Detection] read %s' % self.args.database_file) self.N_font = database.shape[0] self.N_class = database.shape[1] self.N_theta = self.test_theta.shape[1] database = database.reshape((database.shape[0]*database.shape[1], database.shape[2], database.shape[3])) database = torch.from_numpy(database) if self.args.use_gpu: database = database.cuda() database = torch.autograd.Variable(database, False) self.model = models.AlternativeOneHotDecoder(database, self.N_font, self.N_class, self.N_theta) self.model.eval() log('[Detection] initialized decoder') elif self.args.mode == 'appr': assert self.args.decoder_files assert self.args.test_codes_file assert self.args.test_theta_file self.test_codes = utils.read_hdf5(self.args.test_codes_file) log('[Detection] read %s' % self.args.test_codes_file) self.test_theta = utils.read_hdf5(self.args.test_theta_file) log('[Detection] read %s' % self.args.test_theta_file) self.test_codes = self.test_codes[:self.perturbations.shape[0]] self.test_theta = self.test_theta[:self.perturbations.shape[0]] self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0) self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0) self.test_codes = self.test_codes[:max_samples] self.test_theta = self.test_theta[:max_samples] assert self.args.decoder_files decoder_files = self.args.decoder_files.split(',') for decoder_file in decoder_files: assert os.path.exists(decoder_file), 'could not find %s' % decoder_file resolution = [1 if len(self.test_images.shape) <= 3 else self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]] decoder_units = list(map(int, self.args.decoder_units.split(','))) if len(decoder_files) > 1: log('[Detection] loading multiple decoders') decoders = [] for i in range(len(decoder_files)): decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=resolution, architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args.decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[i]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoders.append(decoder) decoder.eval() log('[Detection] loaded %s' % decoder_files[i]) self.model = models.SelectiveDecoder(decoders, resolution=resolution) else: log('[Detection] loading one decoder') decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=resolution, architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args.decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[0]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoder.eval() log('[Detection] read decoder') self.model = decoder
def load_model(self): """ Load model. """ assert self.args.decoder_files decoder_files = self.args.decoder_files.split(',') for decoder_file in decoder_files: assert os.path.exists( decoder_file), 'could not find %s' % decoder_file decoder_units = list(map(int, self.args.decoder_units.split(','))) log('[Attack] using %d input channels' % self.test_images.shape[3]) if len(decoder_files) > 1: log('[Attack] loading multiple decoders') decoders = [] for i in range(len(decoder_files)): decoder = models.LearnedDecoder( self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]), architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args. decoder_no_batch_normalization, units=decoder_units) log(decoder) state = State.load(decoder_files[i]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoders.append(decoder) decoder.eval() log('[Attack] loaded %s' % decoder_files[i]) decoder = models.SelectiveDecoder( decoders, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2])) else: log('[Attack] loading one decoder') decoder = models.LearnedDecoder( self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]), architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args. decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[0]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoder.eval() log('[Attack] read decoder') classifier_units = list(map(int, self.args.network_units.split(','))) classifier = models.Classifier( self.N_class, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]), architecture=self.args.network_architecture, activation=self.args.network_activation, batch_normalization=not self.args.network_no_batch_normalization, start_channels=self.args.network_channels, dropout=self.args.network_dropout, units=classifier_units) assert os.path.exists( self.args.classifier_file ), 'state file %s not found' % self.args.classifier_file state = State.load(self.args.classifier_file) log('[Attack] read %s' % self.args.classifier_file) classifier.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(classifier): log('[Attack] classifier is not CUDA') classifier = classifier.cuda() log('[Attack] loaded classifier') # ! classifier.eval() log('[Attack] set classifier to eval') self.model = models.DecoderClassifier(decoder, classifier)
def load_data(self): """ Load data and model. """ self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32) log('[Testing] read %s' % self.args.test_images_file) # For handling both color and gray images. if len(self.test_images.shape) < 4: self.test_images = numpy.expand_dims(self.test_images, axis=3) log('[Testing] no color images, adjusted size') self.resolution = self.test_images.shape[2] log('[Testing] resolution %d' % self.resolution) self.train_images = utils.read_hdf5(self.args.train_images_file).astype(numpy.float32) # ! self.train_images = self.train_images.reshape((self.train_images.shape[0], -1)) log('[Testing] read %s' % self.args.train_images_file) self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(numpy.float32) log('[Testing] read %s' % self.args.test_theta_file) self.train_theta = utils.read_hdf5(self.args.train_theta_file).astype(numpy.float32) log('[Testing] read %s' % self.args.train_theta_file) self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int) self.test_codes = self.test_codes[:, self.args.label_index] self.N_class = numpy.max(self.test_codes) + 1 log('[Testing] read %s' % self.args.test_codes_file) self.accuracy = utils.read_hdf5(self.args.accuracy_file) log('[Testing] read %s' % self.args.accuracy_file) self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32) self.N_attempts = self.perturbations.shape[0] assert not numpy.any(self.perturbations != self.perturbations), 'NaN in perturbations' # First, repeat relevant data. self.perturbation_theta = numpy.repeat(self.test_theta[:self.perturbations.shape[1]], self.N_attempts, axis=0) self.perturbation_codes = numpy.repeat(self.test_codes[:self.perturbations.shape[1]], self.N_attempts, axis=0) self.perturbation_codes = numpy.squeeze(self.perturbation_codes) self.accuracy = numpy.repeat(self.accuracy[:self.perturbations.shape[1]], self.N_attempts, axis=0) # Then, reshape the perturbations! self.perturbations = numpy.swapaxes(self.perturbations, 0, 1) self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], -1)) log('[Testing] read %s' % self.args.perturbations_file) self.success = utils.read_hdf5(self.args.success_file) self.success = numpy.swapaxes(self.success, 0, 1) self.success = self.success.reshape((self.success.shape[0] * self.success.shape[1])) log('[Testing] read %s' % self.args.success_file) assert self.args.decoder_files decoder_files = self.args.decoder_files.split(',') for decoder_file in decoder_files: assert os.path.exists(decoder_file), 'could not find %s' % decoder_file log('[Testing] using %d input channels' % self.test_images.shape[3]) decoder_units = list(map(int, self.args.decoder_units.split(','))) if len(decoder_files) > 1: log('[Testing] loading multiple decoders') decoders = [] for i in range(len(decoder_files)): decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]), architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args.decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[i]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoders.append(decoder) decoder.eval() log('[Testing] loaded %s' % decoder_files[i]) self.model = models.SelectiveDecoder(decoders, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2])) else: log('[Testing] loading one decoder') decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]), architecture=self.args.decoder_architecture, start_channels=self.args.decoder_channels, activation=self.args.decoder_activation, batch_normalization=not self.args.decoder_no_batch_normalization, units=decoder_units) state = State.load(decoder_files[0]) decoder.load_state_dict(state.model) if self.args.use_gpu and not cuda.is_cuda(decoder): decoder = decoder.cuda() decoder.eval() log('[Testing] read decoder') self.model = decoder