def train(self, epoch): """ Train for one epoch. :param epoch: current epoch :type epoch: int """ self.encoder.train() log('[Training] %d set encoder to train' % epoch) self.decoder.train() log('[Training] %d set decoder to train' % epoch) self.classifier.train() log('[Training] %d set classifier to train' % epoch) num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) assert self.encoder.training is True permutation = numpy.random.permutation(self.train_images.shape[0]) permutation = numpy.concatenate( (permutation, permutation[:self.args.batch_size]), axis=0) for b in range(num_batches): self.encoder_scheduler.update(epoch, float(b) / num_batches) self.decoder_scheduler.update(epoch, float(b) / num_batches) self.classifier_scheduler.update(epoch, float(b) / num_batches) perm = permutation[b * self.args.batch_size:(b + 1) * self.args.batch_size] batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu, True) batch_images = batch_images.permute(0, 3, 1, 2) output_mu, output_logvar = self.encoder(batch_images) output_codes = self.reparameterize(output_mu, output_logvar) output_images = self.decoder(output_codes) output_real_classes = self.classifier(batch_images) output_reconstructed_classes = self.classifier(output_images) latent_loss = self.latent_loss(output_mu, output_logvar) reconstruction_loss = self.reconstruction_loss( batch_images, output_images) decoder_loss = self.decoder_loss(output_reconstructed_classes) discriminator_loss = self.discriminator_loss( output_real_classes, output_reconstructed_classes) self.encoder_scheduler.optimizer.zero_grad() loss = latent_loss + self.args.beta * reconstruction_loss + self.args.gamma * decoder_loss + self.args.eta * torch.sum( torch.abs(output_logvar)) loss.backward(retain_graph=True) self.encoder_scheduler.optimizer.step() self.decoder_scheduler.optimizer.zero_grad() loss = self.args.beta * reconstruction_loss + self.args.gamma * decoder_loss loss.backward(retain_graph=True) self.decoder_scheduler.optimizer.step() self.classifier_scheduler.optimizer.zero_grad() loss = self.args.gamma * discriminator_loss loss.backward() self.classifier_scheduler.optimizer.step() reconstruction_error = self.reconstruction_error( batch_images, output_images) iteration = epoch * num_batches + b + 1 self.train_statistics = numpy.vstack( (self.train_statistics, numpy.array([ iteration, iteration * self.args.batch_size, min(num_batches, iteration), min(num_batches, iteration) * self.args.batch_size, reconstruction_loss.data, reconstruction_error.data, latent_loss.data, torch.mean(output_mu).item(), torch.var(output_mu).item(), torch.mean(output_logvar).item(), decoder_loss.item(), discriminator_loss.item(), torch.mean( torch.abs(list( self.encoder.parameters())[0].grad)).item(), torch.mean( torch.abs(list( self.decoder.parameters())[0].grad)).item(), torch.mean( torch.abs(list( self.classifier.parameters())[0].grad)).item() ]))) skip = 10 if b % skip == skip // 2: log('[Training] %d | %d: %g (%g) %g (%g, %g, %g)' % ( epoch, b, numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 4]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 5]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 6]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 7]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 8]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 9]), )) log('[Training] %d | %d: %g %g (%g, %g, %g)' % ( epoch, b, numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 10]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 11]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 12]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 13]), numpy.mean(self.train_statistics[max(0, iteration - skip):iteration, 14]), ))
def compute_statistics(self): """ Compute statistics based on distances. """ num_attempts = self.perturbations.shape[0] perturbations = numpy.swapaxes(self.perturbations, 0, 1) perturbations = perturbations.reshape( (perturbations.shape[0] * perturbations.shape[1], perturbations.shape[2])) success = numpy.swapaxes(self.success, 0, 1) success = success.reshape((success.shape[0] * success.shape[1])) probabilities = numpy.swapaxes(self.probabilities, 0, 1) probabilities = probabilities.reshape( (probabilities.shape[0] * probabilities.shape[1], -1)) confidences = numpy.max(probabilities, 1) perturbation_probabilities = self.test_probabilities[:self.success. shape[1]] perturbation_probabilities = numpy.repeat(perturbation_probabilities, num_attempts, axis=0) perturbation_confidences = numpy.max(perturbation_probabilities, 1) probability_ratios = confidences / perturbation_confidences raw_overall_success = success >= 0 log('[Testing] %d valid attacks' % numpy.sum(raw_overall_success)) # For off-manifold attacks this should not happen, but save is save. if not numpy.any(raw_overall_success): for type in [ 'raw_success', 'raw_iteration', 'raw_roc', 'raw_confidence_weighted_success', 'raw_confidence', 'raw_ratios' ]: self.results[type] = 0 if self.args.results_file: utils.write_pickle(self.args.results_file, self.results) log('[Testing] wrote %s' % self.args.results_file) log('[Testing] no successful attacks found, no plots') return # # We compute some simple statistics: # - raw success rate: fraction of successful attack without considering epsilon # - corrected success rate: fraction of successful attacks within epsilon-ball # - raw average perturbation: average distance to original samples (for successful attacks) # - corrected average perturbation: average distance to original samples for perturbations # within epsilon-ball (for successful attacks). # These statistics can also be computed per class. # And these statistics are computed with respect to three norms. if self.args.plot_directory and utils.display(): iterations = success[raw_overall_success] x = numpy.arange(numpy.max(iterations) + 1) y = numpy.bincount(iterations) plot_file = os.path.join(self.args.plot_directory, 'iterations') plot.bar(plot_file, x, y, title='Distribution of Iterations of Successful Attacks', xlabel='Number of Iterations', ylabel='Count') log('[Testing] wrote %s' % plot_file) plot_file = os.path.join(self.args.plot_directory, 'probabilities') plot.histogram(plot_file, confidences[raw_overall_success], 50) log('[Testing] wrote %s' % plot_file) plot_file = os.path.join(self.args.plot_directory, 'probability_ratios') plot.histogram(plot_file, probability_ratios, 50) log('[Testing] wrote %s' % plot_file) plot_file = os.path.join(self.args.plot_directory, 'test_probabilities') plot.histogram( plot_file, self.test_probabilities[ numpy.arange(self.test_probabilities.shape[0]), self.test_codes], 50) log('[Testing] wrote %s' % plot_file) y_true = numpy.concatenate( (numpy.zeros(confidences.shape[0]), numpy.ones(perturbation_confidences.shape[0]))) y_score = numpy.concatenate((confidences, perturbation_confidences)) roc_auc_score = sklearn.metrics.roc_auc_score(y_true, y_score) self.results['raw_roc'] = roc_auc_score self.results['raw_confidence_weighted_success'] = numpy.sum( confidences[raw_overall_success]) / numpy.sum( perturbation_confidences) self.results['raw_confidence'] = numpy.mean( probabilities[raw_overall_success]) self.results['raw_ratios'] = numpy.mean( probability_ratios[raw_overall_success]) self.results['raw_success'] = numpy.sum( raw_overall_success) / success.shape[0] self.results['raw_iteration'] = numpy.average( success[raw_overall_success]) if self.args.results_file: utils.write_pickle(self.args.results_file, self.results) log('[Testing] wrote %s' % self.args.results_file)
def train(self): """ Train adversarially. """ num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) perturbation_permutation = numpy.random.permutation( self.train_images.shape[0]) if self.args.safe: perturbation_permutation = perturbation_permutation[ self.train_valid == 1] else: perturbation_permuation = permutation for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) self.model.eval() assert self.model.training is False objective = self.objective_class() split = self.args.batch_size // 2 if self.args.full_variant: perm = numpy.concatenate( (numpy.take(permutation, range(b * self.args.batch_size, b * self.args.batch_size + split), mode='wrap'), numpy.take(perturbation_permutation, range(b * self.args.batch_size + split, (b + 1) * self.args.batch_size), mode='wrap')), axis=0) batch_images = common.torch.as_variable( self.train_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable( self.train_codes[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) attack = self.setup_attack(self.model, batch_images[:split], batch_classes[:split]) success, perturbations, _, _, _ = attack.run( objective, self.args.verbose) batch_perturbations1 = common.torch.as_variable( perturbations.astype(numpy.float32), self.args.use_gpu) batch_perturbed_images1 = batch_images[:split] + batch_perturbations1 if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[split:]) attack = self.setup_decoder_attack(self.decoder_classifier, batch_theta[split:], batch_classes[split:]) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) decoder_success, decoder_perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta[ split:] + common.torch.as_variable(decoder_perturbations, self.args.use_gpu) batch_perturbed_images2 = self.decoder(batch_perturbed_theta) batch_perturbations2 = batch_perturbed_images2 - batch_images[ split:] batch_input_images = torch.cat( (batch_perturbed_images1, batch_perturbed_images2), dim=0) self.model.train() assert self.model.training is True output_classes = self.model(batch_input_images) self.scheduler.optimizer.zero_grad() perturbation_loss = self.loss(batch_classes[:split], output_classes[:split]) decoder_perturbation_loss = self.loss(batch_classes[split:], output_classes[split:]) loss = (perturbation_loss + decoder_perturbation_loss) / 2 loss.backward() self.scheduler.optimizer.step() loss = loss.item() perturbation_loss = perturbation_loss.item() decoder_perturbation_loss = decoder_perturbation_loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() perturbation_error = self.error(batch_classes[:split], output_classes[:split]) perturbation_error = perturbation_error.item() decoder_perturbation_error = self.error( batch_classes[split:], output_classes[split:]) decoder_perturbation_error = decoder_perturbation_error.item() error = (perturbation_error + decoder_perturbation_error) / 2 else: perm = numpy.concatenate(( numpy.take( perturbation_permutation, range(b * self.args.batch_size + split + split // 2, (b + 1) * self.args.batch_size), mode='wrap'), numpy.take( permutation, range(b * self.args.batch_size, b * self.args.batch_size + split + split // 2), mode='wrap'), ), axis=0) batch_images = common.torch.as_variable( self.train_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable( self.train_codes[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) attack = self.setup_attack(self.model, batch_images[split // 2:split], batch_classes[split // 2:split]) success, perturbations, _, _, _ = attack.run( objective, self.args.verbose) batch_perturbations1 = common.torch.as_variable( perturbations.astype(numpy.float32), self.args.use_gpu) batch_perturbed_images1 = batch_images[ split // 2:split] + batch_perturbations1 if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[:split // 2]) attack = self.setup_decoder_attack(self.decoder_classifier, batch_theta[:split // 2], batch_classes[:split // 2]) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) decoder_success, decoder_perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta[:split // 2] + common.torch.as_variable( decoder_perturbations, self.args.use_gpu) batch_perturbed_images2 = self.decoder(batch_perturbed_theta) batch_perturbations2 = batch_perturbed_images2 - batch_images[:split // 2] batch_input_images = torch.cat( (batch_perturbed_images2, batch_perturbed_images1, batch_images[split:]), dim=0) self.model.train() assert self.model.training is True output_classes = self.model(batch_input_images) self.scheduler.optimizer.zero_grad() loss = self.loss(batch_classes[split:], output_classes[split:]) perturbation_loss = self.loss(batch_classes[split // 2:split], output_classes[split // 2:split]) decoder_perturbation_loss = self.loss( batch_classes[:split // 2], output_classes[:split // 2]) l = (loss + perturbation_loss + decoder_perturbation_loss) / 3 l.backward() self.scheduler.optimizer.step() loss = loss.item() perturbation_loss = perturbation_loss.item() decoder_perturbation_loss = decoder_perturbation_loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() error = self.error(batch_classes[split:], output_classes[split:]) error = error.item() perturbation_error = self.error( batch_classes[split // 2:split], output_classes[split // 2:split]) perturbation_error = perturbation_error.item() decoder_perturbation_error = self.error( batch_classes[:split // 2], output_classes[:split // 2]) decoder_perturbation_error = decoder_perturbation_error.item() iterations = numpy.mean( success[success >= 0]) if numpy.sum(success >= 0) > 0 else -1 norm = numpy.mean( numpy.linalg.norm(perturbations.reshape( perturbations.shape[0], -1), axis=1, ord=self.norm)) success = numpy.sum(success >= 0) / self.args.batch_size decoder_iterations = numpy.mean( decoder_success[decoder_success >= 0]) if numpy.sum( decoder_success >= 0) > 0 else -1 decoder_norm = numpy.mean( numpy.linalg.norm(decoder_perturbations, axis=1, ord=self.norm)) decoder_success = numpy.sum( decoder_success >= 0) / self.args.batch_size iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack(( self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, decoder_perturbation_loss, decoder_perturbation_error, success, iterations, norm, decoder_success, decoder_iterations, decoder_norm, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 7]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 8]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, -1]), )) log('[Training] %d | %d: %g (%g, %g) %g (%g, %g)' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 9]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 10]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 11]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 12]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 13]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 14]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images1.permute(0, 2, 3, 1)) self.debug('perturbed2.%d.png' % self.epoch, batch_perturbed_images2.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations1.permute(0, 2, 3, 1), cmap='seismic') self.debug('perturbation2.%d.png' % self.epoch, batch_perturbations2.permute(0, 2, 3, 1), cmap='seismic')
def test(self): """ Test the model. """ self.model.eval() assert self.model.training is False log('[Training] %d set classifier to eval' % self.epoch) loss = error = 0 num_batches = int( math.ceil(self.args.test_samples / self.args.batch_size)) for b in range(num_batches): perm = numpy.take(range(self.args.test_samples), range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) loss += e.item() a = self.error(batch_classes, output_classes) error += a.item() perturbation_loss = perturbation_error = success = iterations = norm = 0 num_batches = int( math.ceil(self.args.attack_samples / self.args.batch_size)) assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[ 0] for b in range(num_batches): perm = numpy.take(range(self.args.attack_samples), range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) objective = self.objective_class() attack = self.setup_attack(self.model, batch_images, batch_classes) s, p, _, _, _ = attack.run(objective, False) batch_images = batch_images + common.torch.as_variable( p.astype(numpy.float32), self.args.use_gpu) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) perturbation_loss += e.item() e = self.error(batch_classes, output_classes) perturbation_error += e.item() iterations += numpy.mean( s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1 norm += numpy.mean( numpy.linalg.norm(p.reshape(p.shape[0], -1), axis=1, ord=self.norm)) success += numpy.sum(s >= 0) / self.args.batch_size decoder_perturbation_loss = decoder_perturbation_error = decoder_success = decoder_iterations = decoder_norm = 0 num_batches = int( math.ceil(self.args.attack_samples / self.args.batch_size)) assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[ 0] for b in range(num_batches): perm = numpy.take(range(self.args.attack_samples), range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='clip') batch_theta = common.torch.as_variable(self.test_theta[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) objective = self.objective_class() if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) attack = self.setup_decoder_attack(self.decoder_classifier, batch_theta, batch_classes) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) s, p, _, _, _ = attack.run(objective, False) perturbations = common.torch.as_variable(p, self.args.use_gpu) batch_perturbed_theta = batch_theta + perturbations batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) e = self.loss(batch_classes, output_classes) perturbation_loss += e.item() a = self.error(batch_classes, output_classes) perturbation_error += a.item() decoder_iterations += numpy.mean( s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1 decoder_norm += numpy.mean( numpy.linalg.norm(p.reshape(p.shape[0], -1), axis=1, ord=self.norm)) decoder_success += numpy.sum(s >= 0) / self.args.batch_size loss /= num_batches error /= num_batches perturbation_loss /= num_batches perturbation_error /= num_batches success /= num_batches iterations /= num_batches norm /= num_batches decoder_perturbation_loss /= num_batches decoder_perturbation_error /= num_batches decoder_success /= num_batches decoder_iterations /= num_batches decoder_norm /= num_batches log('[Training] %d: test %g (%g) %g (%g) %g (%g)' % (self.epoch, loss, error, perturbation_loss, perturbation_error, decoder_perturbation_loss, decoder_perturbation_error)) log('[Training] %d: test %g (%g, %g) %g (%g, %g)' % (self.epoch, success, iterations, norm, decoder_success, decoder_iterations, decoder_norm)) num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) iteration = self.epoch * num_batches self.test_statistics = numpy.vstack(( self.test_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, decoder_perturbation_loss, decoder_perturbation_error, success, iterations, norm, decoder_success, decoder_iterations, decoder_norm, ]])))
def train(self, epoch): """ Train for one epoch. :param epoch: current epoch :type epoch: int """ assert self.encoder is not None and self.decoder is not None assert self.scheduler is not None self.auto_encoder.train() log('[Training] %d set auto encoder to train' % epoch) self.encoder.train() log('[Training] %d set encoder to train' % epoch) self.decoder.train() log('[Training] %d set decoder to train' % epoch) num_batches = int(math.ceil(self.train_images.shape[0]/self.args.batch_size)) assert self.encoder.training is True permutation = numpy.random.permutation(self.train_images.shape[0]) permutation = numpy.concatenate((permutation, permutation[:self.args.batch_size]), axis=0) for b in range(num_batches): self.scheduler.update(epoch, float(b)/num_batches) perm = permutation[b * self.args.batch_size: (b + 1) * self.args.batch_size] batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) output_images, output_mu, output_logvar = self.auto_encoder(batch_images) reconstruction_loss = self.reconstruction_loss(batch_images, output_images) self.scheduler.optimizer.zero_grad() latent_loss = self.latent_loss(output_mu, output_logvar) loss = self.args.beta*reconstruction_loss + latent_loss loss.backward() self.scheduler.optimizer.step() reconstruction_loss = reconstruction_loss.item() latent_loss = latent_loss.item() reconstruction_error = self.reconstruction_error(batch_images, output_images) reconstruction_error = reconstruction_error.item() iteration = epoch*num_batches + b + 1 self.train_statistics = numpy.vstack((self.train_statistics, numpy.array([ iteration, iteration * self.args.batch_size, min(num_batches, iteration), min(num_batches, iteration) * self.args.batch_size, reconstruction_loss, reconstruction_error, latent_loss, torch.mean(output_mu).item(), torch.var(output_mu).item(), torch.mean(output_logvar).item(), ]))) skip = 10 if b%skip == skip//2: log('[Training] %d | %d: %g (%g) %g %g %g %g' % ( epoch, b, numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 4]), numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 5]), numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 6]), numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 7]), numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 8]), numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 9]), ))
def train(self): """ Train with fair data augmentation. """ self.model.train() assert self.model.training is True assert self.decoder.training is False split = self.args.batch_size // 2 num_batches = int(math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) perm = numpy.take(permutation, range(b*self.args.batch_size, (b+1)*self.args.batch_size), mode='wrap') batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.train_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) loss = error = gradient = 0 if self.args.full_variant: for t in range(self.args.max_iterations): if self.args.strong_variant: # Here we want to sample form a truncated Gaussian random = common.numpy.truncated_normal(batch_theta.size(), lower=-self.args.bound, upper=self.args.bound) batch_perturbed_theta = common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) batch_perturbed_images = self.decoder(batch_perturbed_theta) else: random = common.numpy.uniform_ball(batch_theta.size(0), batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta + common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min(common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max(common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes, output_classes) l.backward() self.scheduler.optimizer.step() loss += l.item() g = torch.mean(torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes, output_classes) error += e.item() batch_perturbations = batch_perturbed_images - batch_images gradient /= self.args.max_iterations loss /= self.args.max_iterations error /= self.args.max_iterations perturbation_loss = loss perturbation_error = error else: output_classes = self.model(batch_images[:split]) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[:split], output_classes) l.backward() self.scheduler.optimizer.step() loss = l.item() gradient = torch.mean(torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() e = self.error(batch_classes[:split], output_classes) error = e.item() perturbation_loss = perturbation_error = 0 for t in range(self.args.max_iterations): if self.args.strong_variant: # Here we want to sample form a truncated Gaussian random = common.numpy.truncated_normal([split, batch_theta.size(1)], lower=-self.args.bound, upper=self.args.bound) batch_perturbed_theta = common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[split:]) batch_perturbed_images = self.decoder(batch_perturbed_theta) else: random = common.numpy.uniform_ball(split, batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta[split:] + common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min(common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max(common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[split:]) batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[split:], output_classes) l.backward() self.scheduler.optimizer.step() perturbation_loss += l.item() g = torch.mean(torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes[split:], output_classes) perturbation_error += e.item() batch_perturbations = batch_perturbed_images - batch_images[split:] gradient /= self.args.max_iterations + 1 perturbation_loss /= self.args.max_iterations perturbation_error /= self.args.max_iterations iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack((self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, -1]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations.permute(0, 2, 3, 1), cmap='seismic')
def train(self): """ Train adversarially. """ split = self.args.batch_size // 2 num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) perm = numpy.take(permutation, range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='wrap') batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) batch_fonts = self.train_codes[perm, 1] batch_classes = self.train_codes[perm, self.args.label_index] batch_code = numpy.concatenate( (common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype(numpy.float32) batch_code = common.torch.as_variable(batch_code, self.args.use_gpu) batch_classes = common.torch.as_variable(batch_classes, self.args.use_gpu) self.model.eval() assert self.model.training is False if self.args.full_variant: objective = self.objective_class() self.decoder.set_code(batch_code) attack = self.setup_attack(self.decoder_classifier, batch_theta, batch_classes) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) success, perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta + common.torch.as_variable( perturbations, self.args.use_gpu) batch_perturbed_images = self.decoder(batch_perturbed_theta) batch_perturbations = batch_perturbed_images - batch_images self.model.train() assert self.model.training is True output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() loss = self.loss(batch_classes, output_classes) loss.backward() self.scheduler.optimizer.step() loss = perturbation_loss = loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() error = self.error(batch_classes, output_classes) error = perturbation_error = error.item() else: objective = self.objective_class() self.decoder.set_code(batch_code[split:]) attack = self.setup_attack(self.decoder_classifier, batch_theta[split:], batch_classes[split:]) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) success, perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta[ split:] + common.torch.as_variable(perturbations, self.args.use_gpu) batch_perturbed_images = self.decoder(batch_perturbed_theta) batch_perturbations = batch_perturbed_images - batch_images[ split:] self.model.train() assert self.model.training is True batch_input_images = torch.cat( (batch_images[:split], batch_perturbed_images), dim=0) output_classes = self.model(batch_input_images) self.scheduler.optimizer.zero_grad() loss = self.loss(batch_classes[:split], output_classes[:split]) perturbation_loss = self.loss(batch_classes[split:], output_classes[split:]) l = (loss + perturbation_loss) / 2 l.backward() self.scheduler.optimizer.step() loss = loss.item() perturbation_loss = perturbation_loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() error = self.error(batch_classes[:split], output_classes[:split]) error = error.item() perturbation_error = self.error(batch_classes[split:], output_classes[split:]) perturbation_error = perturbation_error.item() iterations = numpy.mean( success[success >= 0]) if numpy.sum(success >= 0) > 0 else -1 norm = numpy.mean( numpy.linalg.norm(perturbations.reshape( perturbations.shape[0], -1), axis=1, ord=self.norm)) success = numpy.sum(success >= 0) / (self.args.batch_size // 2) iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack(( self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, success, iterations, norm, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, -1]), )) log('[Training] %d | %d: %g (%g, %g)' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 7]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 8]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 9]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations.permute(0, 2, 3, 1), cmap='seismic')
def train(self): """ Train with fair data augmentation. """ self.model.train() assert self.model.training is True split = self.args.batch_size // 2 num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) perm = numpy.take(permutation, range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='wrap') batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) batch_fonts = self.train_codes[perm, 1] batch_classes = self.train_codes[perm, self.args.label_index] batch_code = numpy.concatenate( (common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype(numpy.float32) batch_code = common.torch.as_variable(batch_code, self.args.use_gpu) batch_classes = common.torch.as_variable(batch_classes, self.args.use_gpu) loss = error = gradient = 0 if self.args.full_variant: for t in range(self.args.max_iterations): if self.args.strong_variant: # Here, we want to uniformly sample all allowed transformations, so that's OK. min_bound = numpy.repeat(self.min_bound.reshape(1, -1), self.args.batch_size, axis=0) max_bound = numpy.repeat(self.max_bound.reshape(1, -1), self.args.batch_size, axis=0) random = numpy.random.uniform( min_bound, max_bound, (batch_theta.size(0), batch_theta.size(1))) batch_perturbed_theta = common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) self.decoder.set_code(batch_code) batch_perturbed_images = self.decoder( batch_perturbed_theta) else: random = common.numpy.uniform_ball( batch_theta.size(0), batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta + common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min( common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max( common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) self.decoder.set_code(batch_code) batch_perturbed_images = self.decoder( batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes, output_classes) l.backward() self.scheduler.optimizer.step() loss += l.item() g = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes, output_classes) error += e.item() batch_perturbations = batch_perturbed_images - batch_images gradient /= self.args.max_iterations loss /= self.args.max_iterations error /= self.args.max_iterations perturbation_loss = loss perturbation_error = error else: output_classes = self.model(batch_images[:split]) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[:split], output_classes) l.backward() self.scheduler.optimizer.step() loss = l.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() e = self.error(batch_classes[:split], output_classes) error = e.item() perturbation_loss = perturbation_error = 0 for t in range(self.args.max_iterations): if self.args.strong_variant: # Again, sampling all possible transformations. min_bound = numpy.repeat(self.min_bound.reshape(1, -1), split, axis=0) max_bound = numpy.repeat(self.max_bound.reshape(1, -1), split, axis=0) random = numpy.random.uniform( min_bound, max_bound, (split, batch_theta.size(1))) batch_perturbed_theta = common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) self.decoder.set_code(batch_code[split:]) batch_perturbed_images = self.decoder( batch_perturbed_theta) else: random = common.numpy.uniform_ball( split, batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta[ split:] + common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min( common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max( common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) self.decoder.set_code(batch_code[split:]) batch_perturbed_images = self.decoder( batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[split:], output_classes) l.backward() self.scheduler.optimizer.step() perturbation_loss += l.item() g = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes[split:], output_classes) perturbation_error += e.item() batch_perturbations = batch_perturbed_images - batch_images[ split:] gradient /= self.args.max_iterations + 1 perturbation_loss /= self.args.max_iterations perturbation_error /= self.args.max_iterations iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack(( self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, -1]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations.permute(0, 2, 3, 1), cmap='seismic')
def test(self): """ Test the model. """ self.model.eval() log('[Training] %d set classifier to eval' % self.epoch) assert self.model.training is False loss = error = perturbation_loss = perturbation_error = success = iterations = norm = 0 num_batches = int(math.ceil(self.args.test_samples/self.args.batch_size)) for b in range(num_batches): perm = numpy.take(range(self.args.test_samples), range(b*self.args.batch_size, (b+1)*self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) loss += e.data # 0-dim tensor a = self.error(batch_classes, output_classes) error += a.data loss /= num_batches error /= num_batches num_batches = int(math.ceil(self.args.attack_samples/self.args.batch_size)) assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[0] for b in range(num_batches): perm = numpy.take(range(self.args.attack_samples), range(b*self.args.batch_size, (b+1)*self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) objective = self.objective_class() attack = self.setup_attack(self.model, batch_images, batch_classes) s, p, _, _, _ = attack.run(objective, False) batch_images = batch_images + common.torch.as_variable(p.astype(numpy.float32), self.args.use_gpu) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) perturbation_loss += e.item() e = self.error(batch_classes, output_classes) perturbation_error += e.item() iterations += numpy.mean(s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1 norm += numpy.mean(numpy.linalg.norm(p.reshape(p.shape[0], -1), axis=1, ord=self.norm)) success += numpy.sum(s >= 0)/self.args.batch_size perturbation_error /= num_batches perturbation_loss /= num_batches success /= num_batches iterations /= num_batches norm /= num_batches log('[Training] %d: test %g (%g) %g (%g)' % (self.epoch, loss, error, perturbation_loss, perturbation_error)) log('[Training] %d: test %g (%g, %g)' % (self.epoch, success, iterations, norm)) num_batches = int(math.ceil(self.train_images.shape[0]/self.args.batch_size)) iteration = self.epoch*num_batches self.test_statistics = numpy.vstack((self.test_statistics, numpy.array([[ iteration, iteration * self.args.batch_size, min(num_batches, iteration) * self.args.batch_size, loss, error, perturbation_loss, perturbation_error, success, iterations, norm ]])))