def test(self): """ Test the model. """ self.model.eval() assert self.model.training is False log('[Training] %d set classifier to eval' % self.epoch) loss = error = 0 num_batches = int( math.ceil(self.args.test_samples / self.args.batch_size)) for b in range(num_batches): perm = numpy.take(range(self.args.test_samples), range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) loss += e.item() a = self.error(batch_classes, output_classes) error += a.item() perturbation_loss = perturbation_error = success = iterations = norm = 0 num_batches = int( math.ceil(self.args.attack_samples / self.args.batch_size)) assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[ 0] for b in range(num_batches): perm = numpy.take(range(self.args.attack_samples), range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) objective = self.objective_class() attack = self.setup_attack(self.model, batch_images, batch_classes) s, p, _, _, _ = attack.run(objective, False) batch_images = batch_images + common.torch.as_variable( p.astype(numpy.float32), self.args.use_gpu) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) perturbation_loss += e.item() e = self.error(batch_classes, output_classes) perturbation_error += e.item() iterations += numpy.mean( s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1 norm += numpy.mean( numpy.linalg.norm(p.reshape(p.shape[0], -1), axis=1, ord=self.norm)) success += numpy.sum(s >= 0) / self.args.batch_size decoder_perturbation_loss = decoder_perturbation_error = decoder_success = decoder_iterations = decoder_norm = 0 num_batches = int( math.ceil(self.args.attack_samples / self.args.batch_size)) assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[ 0] for b in range(num_batches): perm = numpy.take(range(self.args.attack_samples), range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='clip') batch_theta = common.torch.as_variable(self.test_theta[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) objective = self.objective_class() if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) attack = self.setup_decoder_attack(self.decoder_classifier, batch_theta, batch_classes) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) s, p, _, _, _ = attack.run(objective, False) perturbations = common.torch.as_variable(p, self.args.use_gpu) batch_perturbed_theta = batch_theta + perturbations batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) e = self.loss(batch_classes, output_classes) perturbation_loss += e.item() a = self.error(batch_classes, output_classes) perturbation_error += a.item() decoder_iterations += numpy.mean( s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1 decoder_norm += numpy.mean( numpy.linalg.norm(p.reshape(p.shape[0], -1), axis=1, ord=self.norm)) decoder_success += numpy.sum(s >= 0) / self.args.batch_size loss /= num_batches error /= num_batches perturbation_loss /= num_batches perturbation_error /= num_batches success /= num_batches iterations /= num_batches norm /= num_batches decoder_perturbation_loss /= num_batches decoder_perturbation_error /= num_batches decoder_success /= num_batches decoder_iterations /= num_batches decoder_norm /= num_batches log('[Training] %d: test %g (%g) %g (%g) %g (%g)' % (self.epoch, loss, error, perturbation_loss, perturbation_error, decoder_perturbation_loss, decoder_perturbation_error)) log('[Training] %d: test %g (%g, %g) %g (%g, %g)' % (self.epoch, success, iterations, norm, decoder_success, decoder_iterations, decoder_norm)) num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) iteration = self.epoch * num_batches self.test_statistics = numpy.vstack(( self.test_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, decoder_perturbation_loss, decoder_perturbation_error, success, iterations, norm, decoder_success, decoder_iterations, decoder_norm, ]])))
def test(self): """ Test the model. """ self.model.eval() assert self.model.training is False log('[Training] %d set classifier to eval' % self.epoch) loss = error = perturbation_loss = perturbation_error = 0 num_batches = int(math.ceil(self.args.test_samples/self.args.batch_size)) for b in range(num_batches): perm = numpy.take(range(self.args.test_samples), range(b*self.args.batch_size, (b+1)*self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.test_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) loss += e.item() a = self.error(batch_classes, output_classes) error += a.item() if self.args.strong_variant: random = common.numpy.truncated_normal(batch_theta.size(), lower=-self.args.bound, upper=self.args.bound) batch_perturbed_theta = common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) batch_perturbed_images = self.decoder(batch_perturbed_theta) else: random = common.numpy.uniform_ball(batch_theta.size(0), batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta + common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min(common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max(common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) e = self.loss(batch_classes, output_classes) perturbation_loss += e.item() e = self.error(batch_classes, output_classes) perturbation_error += e.item() loss /= num_batches error /= num_batches perturbation_loss /= num_batches perturbation_error /= num_batches log('[Training] %d: test %g (%g) %g (%g)' % (self.epoch, loss, error, perturbation_loss, perturbation_error)) num_batches = int(math.ceil(self.train_images.shape[0]/self.args.batch_size)) iteration = self.epoch * num_batches self.test_statistics = numpy.vstack((self.test_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error ]])))
def train(self): """ Train adversarially. """ num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) perturbation_permutation = numpy.random.permutation( self.train_images.shape[0]) if self.args.safe: perturbation_permutation = perturbation_permutation[ self.train_valid == 1] else: perturbation_permuation = permutation for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) self.model.eval() assert self.model.training is False objective = self.objective_class() split = self.args.batch_size // 2 if self.args.full_variant: perm = numpy.concatenate( (numpy.take(permutation, range(b * self.args.batch_size, b * self.args.batch_size + split), mode='wrap'), numpy.take(perturbation_permutation, range(b * self.args.batch_size + split, (b + 1) * self.args.batch_size), mode='wrap')), axis=0) batch_images = common.torch.as_variable( self.train_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable( self.train_codes[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) attack = self.setup_attack(self.model, batch_images[:split], batch_classes[:split]) success, perturbations, _, _, _ = attack.run( objective, self.args.verbose) batch_perturbations1 = common.torch.as_variable( perturbations.astype(numpy.float32), self.args.use_gpu) batch_perturbed_images1 = batch_images[:split] + batch_perturbations1 if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[split:]) attack = self.setup_decoder_attack(self.decoder_classifier, batch_theta[split:], batch_classes[split:]) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) decoder_success, decoder_perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta[ split:] + common.torch.as_variable(decoder_perturbations, self.args.use_gpu) batch_perturbed_images2 = self.decoder(batch_perturbed_theta) batch_perturbations2 = batch_perturbed_images2 - batch_images[ split:] batch_input_images = torch.cat( (batch_perturbed_images1, batch_perturbed_images2), dim=0) self.model.train() assert self.model.training is True output_classes = self.model(batch_input_images) self.scheduler.optimizer.zero_grad() perturbation_loss = self.loss(batch_classes[:split], output_classes[:split]) decoder_perturbation_loss = self.loss(batch_classes[split:], output_classes[split:]) loss = (perturbation_loss + decoder_perturbation_loss) / 2 loss.backward() self.scheduler.optimizer.step() loss = loss.item() perturbation_loss = perturbation_loss.item() decoder_perturbation_loss = decoder_perturbation_loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() perturbation_error = self.error(batch_classes[:split], output_classes[:split]) perturbation_error = perturbation_error.item() decoder_perturbation_error = self.error( batch_classes[split:], output_classes[split:]) decoder_perturbation_error = decoder_perturbation_error.item() error = (perturbation_error + decoder_perturbation_error) / 2 else: perm = numpy.concatenate(( numpy.take( perturbation_permutation, range(b * self.args.batch_size + split + split // 2, (b + 1) * self.args.batch_size), mode='wrap'), numpy.take( permutation, range(b * self.args.batch_size, b * self.args.batch_size + split + split // 2), mode='wrap'), ), axis=0) batch_images = common.torch.as_variable( self.train_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable( self.train_codes[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) attack = self.setup_attack(self.model, batch_images[split // 2:split], batch_classes[split // 2:split]) success, perturbations, _, _, _ = attack.run( objective, self.args.verbose) batch_perturbations1 = common.torch.as_variable( perturbations.astype(numpy.float32), self.args.use_gpu) batch_perturbed_images1 = batch_images[ split // 2:split] + batch_perturbations1 if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[:split // 2]) attack = self.setup_decoder_attack(self.decoder_classifier, batch_theta[:split // 2], batch_classes[:split // 2]) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) decoder_success, decoder_perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta[:split // 2] + common.torch.as_variable( decoder_perturbations, self.args.use_gpu) batch_perturbed_images2 = self.decoder(batch_perturbed_theta) batch_perturbations2 = batch_perturbed_images2 - batch_images[:split // 2] batch_input_images = torch.cat( (batch_perturbed_images2, batch_perturbed_images1, batch_images[split:]), dim=0) self.model.train() assert self.model.training is True output_classes = self.model(batch_input_images) self.scheduler.optimizer.zero_grad() loss = self.loss(batch_classes[split:], output_classes[split:]) perturbation_loss = self.loss(batch_classes[split // 2:split], output_classes[split // 2:split]) decoder_perturbation_loss = self.loss( batch_classes[:split // 2], output_classes[:split // 2]) l = (loss + perturbation_loss + decoder_perturbation_loss) / 3 l.backward() self.scheduler.optimizer.step() loss = loss.item() perturbation_loss = perturbation_loss.item() decoder_perturbation_loss = decoder_perturbation_loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() error = self.error(batch_classes[split:], output_classes[split:]) error = error.item() perturbation_error = self.error( batch_classes[split // 2:split], output_classes[split // 2:split]) perturbation_error = perturbation_error.item() decoder_perturbation_error = self.error( batch_classes[:split // 2], output_classes[:split // 2]) decoder_perturbation_error = decoder_perturbation_error.item() iterations = numpy.mean( success[success >= 0]) if numpy.sum(success >= 0) > 0 else -1 norm = numpy.mean( numpy.linalg.norm(perturbations.reshape( perturbations.shape[0], -1), axis=1, ord=self.norm)) success = numpy.sum(success >= 0) / self.args.batch_size decoder_iterations = numpy.mean( decoder_success[decoder_success >= 0]) if numpy.sum( decoder_success >= 0) > 0 else -1 decoder_norm = numpy.mean( numpy.linalg.norm(decoder_perturbations, axis=1, ord=self.norm)) decoder_success = numpy.sum( decoder_success >= 0) / self.args.batch_size iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack(( self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, decoder_perturbation_loss, decoder_perturbation_error, success, iterations, norm, decoder_success, decoder_iterations, decoder_norm, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 7]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 8]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, -1]), )) log('[Training] %d | %d: %g (%g, %g) %g (%g, %g)' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 9]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 10]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 11]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 12]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 13]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 14]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images1.permute(0, 2, 3, 1)) self.debug('perturbed2.%d.png' % self.epoch, batch_perturbed_images2.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations1.permute(0, 2, 3, 1), cmap='seismic') self.debug('perturbation2.%d.png' % self.epoch, batch_perturbations2.permute(0, 2, 3, 1), cmap='seismic')
def train(self): """ Train with fair data augmentation. """ self.model.train() assert self.model.training is True assert self.decoder.training is False split = self.args.batch_size // 2 num_batches = int(math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) perm = numpy.take(permutation, range(b*self.args.batch_size, (b+1)*self.args.batch_size), mode='wrap') batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.train_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) loss = error = gradient = 0 if self.args.full_variant: for t in range(self.args.max_iterations): if self.args.strong_variant: # Here we want to sample form a truncated Gaussian random = common.numpy.truncated_normal(batch_theta.size(), lower=-self.args.bound, upper=self.args.bound) batch_perturbed_theta = common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) batch_perturbed_images = self.decoder(batch_perturbed_theta) else: random = common.numpy.uniform_ball(batch_theta.size(0), batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta + common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min(common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max(common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes) batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes, output_classes) l.backward() self.scheduler.optimizer.step() loss += l.item() g = torch.mean(torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes, output_classes) error += e.item() batch_perturbations = batch_perturbed_images - batch_images gradient /= self.args.max_iterations loss /= self.args.max_iterations error /= self.args.max_iterations perturbation_loss = loss perturbation_error = error else: output_classes = self.model(batch_images[:split]) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[:split], output_classes) l.backward() self.scheduler.optimizer.step() loss = l.item() gradient = torch.mean(torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() e = self.error(batch_classes[:split], output_classes) error = e.item() perturbation_loss = perturbation_error = 0 for t in range(self.args.max_iterations): if self.args.strong_variant: # Here we want to sample form a truncated Gaussian random = common.numpy.truncated_normal([split, batch_theta.size(1)], lower=-self.args.bound, upper=self.args.bound) batch_perturbed_theta = common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[split:]) batch_perturbed_images = self.decoder(batch_perturbed_theta) else: random = common.numpy.uniform_ball(split, batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta[split:] + common.torch.as_variable(random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min(common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max(common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) if isinstance(self.decoder, models.SelectiveDecoder): self.decoder.set_code(batch_classes[split:]) batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[split:], output_classes) l.backward() self.scheduler.optimizer.step() perturbation_loss += l.item() g = torch.mean(torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes[split:], output_classes) perturbation_error += e.item() batch_perturbations = batch_perturbed_images - batch_images[split:] gradient /= self.args.max_iterations + 1 perturbation_loss /= self.args.max_iterations perturbation_error /= self.args.max_iterations iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack((self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[max(0, iteration - self.args.skip):iteration, -1]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations.permute(0, 2, 3, 1), cmap='seismic')
def train(self): """ Train adversarially. """ split = self.args.batch_size // 2 num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) perm = numpy.take(permutation, range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='wrap') batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) batch_fonts = self.train_codes[perm, 1] batch_classes = self.train_codes[perm, self.args.label_index] batch_code = numpy.concatenate( (common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype(numpy.float32) batch_code = common.torch.as_variable(batch_code, self.args.use_gpu) batch_classes = common.torch.as_variable(batch_classes, self.args.use_gpu) self.model.eval() assert self.model.training is False if self.args.full_variant: objective = self.objective_class() self.decoder.set_code(batch_code) attack = self.setup_attack(self.decoder_classifier, batch_theta, batch_classes) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) success, perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta + common.torch.as_variable( perturbations, self.args.use_gpu) batch_perturbed_images = self.decoder(batch_perturbed_theta) batch_perturbations = batch_perturbed_images - batch_images self.model.train() assert self.model.training is True output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() loss = self.loss(batch_classes, output_classes) loss.backward() self.scheduler.optimizer.step() loss = perturbation_loss = loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() error = self.error(batch_classes, output_classes) error = perturbation_error = error.item() else: objective = self.objective_class() self.decoder.set_code(batch_code[split:]) attack = self.setup_attack(self.decoder_classifier, batch_theta[split:], batch_classes[split:]) attack.set_bound(torch.from_numpy(self.min_bound), torch.from_numpy(self.max_bound)) success, perturbations, probabilities, norm, _ = attack.run( objective, self.args.verbose) batch_perturbed_theta = batch_theta[ split:] + common.torch.as_variable(perturbations, self.args.use_gpu) batch_perturbed_images = self.decoder(batch_perturbed_theta) batch_perturbations = batch_perturbed_images - batch_images[ split:] self.model.train() assert self.model.training is True batch_input_images = torch.cat( (batch_images[:split], batch_perturbed_images), dim=0) output_classes = self.model(batch_input_images) self.scheduler.optimizer.zero_grad() loss = self.loss(batch_classes[:split], output_classes[:split]) perturbation_loss = self.loss(batch_classes[split:], output_classes[split:]) l = (loss + perturbation_loss) / 2 l.backward() self.scheduler.optimizer.step() loss = loss.item() perturbation_loss = perturbation_loss.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() error = self.error(batch_classes[:split], output_classes[:split]) error = error.item() perturbation_error = self.error(batch_classes[split:], output_classes[split:]) perturbation_error = perturbation_error.item() iterations = numpy.mean( success[success >= 0]) if numpy.sum(success >= 0) > 0 else -1 norm = numpy.mean( numpy.linalg.norm(perturbations.reshape( perturbations.shape[0], -1), axis=1, ord=self.norm)) success = numpy.sum(success >= 0) / (self.args.batch_size // 2) iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack(( self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, success, iterations, norm, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, -1]), )) log('[Training] %d | %d: %g (%g, %g)' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 7]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 8]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 9]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations.permute(0, 2, 3, 1), cmap='seismic')
def test(self): """ Test the model. """ self.model.eval() log('[Training] %d set classifier to eval' % self.epoch) loss = error = perturbation_loss = perturbation_error = 0 num_batches = int( math.ceil(self.args.test_samples / self.args.batch_size)) assert self.model.training is False for b in range(num_batches): perm = numpy.take(range(self.args.test_samples), range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.test_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) batch_fonts = self.test_codes[perm, 1] batch_classes = self.test_codes[perm, self.args.label_index] batch_code = numpy.concatenate( (common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype(numpy.float32) batch_code = common.torch.as_variable(batch_code, self.args.use_gpu) batch_classes = common.torch.as_variable(batch_classes, self.args.use_gpu) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) loss += e.item() a = self.error(batch_classes, output_classes) error += a.item() if self.args.strong_variant: min_bound = numpy.repeat(self.min_bound.reshape(1, -1), batch_theta.size(0), axis=0) max_bound = numpy.repeat(self.max_bound.reshape(1, -1), batch_theta.size(0), axis=0) random = numpy.random.uniform( min_bound, max_bound, (batch_theta.size(0), batch_theta.size(1))) batch_perturbed_theta = common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) self.decoder.set_code(batch_code) batch_perturbed_images = self.decoder(batch_perturbed_theta) else: random = common.numpy.uniform_ball(batch_theta.size(0), batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta + common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min( common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max( common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) self.decoder.set_code(batch_code) batch_perturbed_images = self.decoder(batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) l = self.loss(batch_classes, output_classes) perturbation_loss += l.item() e = self.error(batch_classes, output_classes) perturbation_error += e.item() loss /= num_batches error /= num_batches perturbation_loss /= num_batches perturbation_error /= num_batches log('[Training] %d: test %g (%g) %g (%g)' % (self.epoch, loss, error, perturbation_loss, perturbation_error)) num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) iteration = self.epoch * num_batches self.test_statistics = numpy.vstack(( self.test_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error ]])))
def train(self): """ Train with fair data augmentation. """ self.model.train() assert self.model.training is True split = self.args.batch_size // 2 num_batches = int( math.ceil(self.train_images.shape[0] / self.args.batch_size)) permutation = numpy.random.permutation(self.train_images.shape[0]) for b in range(num_batches): self.scheduler.update(self.epoch, float(b) / num_batches) perm = numpy.take(permutation, range(b * self.args.batch_size, (b + 1) * self.args.batch_size), mode='wrap') batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu) batch_theta = common.torch.as_variable(self.train_theta[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) batch_fonts = self.train_codes[perm, 1] batch_classes = self.train_codes[perm, self.args.label_index] batch_code = numpy.concatenate( (common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype(numpy.float32) batch_code = common.torch.as_variable(batch_code, self.args.use_gpu) batch_classes = common.torch.as_variable(batch_classes, self.args.use_gpu) loss = error = gradient = 0 if self.args.full_variant: for t in range(self.args.max_iterations): if self.args.strong_variant: # Here, we want to uniformly sample all allowed transformations, so that's OK. min_bound = numpy.repeat(self.min_bound.reshape(1, -1), self.args.batch_size, axis=0) max_bound = numpy.repeat(self.max_bound.reshape(1, -1), self.args.batch_size, axis=0) random = numpy.random.uniform( min_bound, max_bound, (batch_theta.size(0), batch_theta.size(1))) batch_perturbed_theta = common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) self.decoder.set_code(batch_code) batch_perturbed_images = self.decoder( batch_perturbed_theta) else: random = common.numpy.uniform_ball( batch_theta.size(0), batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta + common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min( common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max( common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) self.decoder.set_code(batch_code) batch_perturbed_images = self.decoder( batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes, output_classes) l.backward() self.scheduler.optimizer.step() loss += l.item() g = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes, output_classes) error += e.item() batch_perturbations = batch_perturbed_images - batch_images gradient /= self.args.max_iterations loss /= self.args.max_iterations error /= self.args.max_iterations perturbation_loss = loss perturbation_error = error else: output_classes = self.model(batch_images[:split]) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[:split], output_classes) l.backward() self.scheduler.optimizer.step() loss = l.item() gradient = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient = gradient.item() e = self.error(batch_classes[:split], output_classes) error = e.item() perturbation_loss = perturbation_error = 0 for t in range(self.args.max_iterations): if self.args.strong_variant: # Again, sampling all possible transformations. min_bound = numpy.repeat(self.min_bound.reshape(1, -1), split, axis=0) max_bound = numpy.repeat(self.max_bound.reshape(1, -1), split, axis=0) random = numpy.random.uniform( min_bound, max_bound, (split, batch_theta.size(1))) batch_perturbed_theta = common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) self.decoder.set_code(batch_code[split:]) batch_perturbed_images = self.decoder( batch_perturbed_theta) else: random = common.numpy.uniform_ball( split, batch_theta.size(1), epsilon=self.args.epsilon, ord=self.norm) batch_perturbed_theta = batch_theta[ split:] + common.torch.as_variable( random.astype(numpy.float32), self.args.use_gpu) batch_perturbed_theta = torch.min( common.torch.as_variable(self.max_bound, self.args.use_gpu), batch_perturbed_theta) batch_perturbed_theta = torch.max( common.torch.as_variable(self.min_bound, self.args.use_gpu), batch_perturbed_theta) self.decoder.set_code(batch_code[split:]) batch_perturbed_images = self.decoder( batch_perturbed_theta) output_classes = self.model(batch_perturbed_images) self.scheduler.optimizer.zero_grad() l = self.loss(batch_classes[split:], output_classes) l.backward() self.scheduler.optimizer.step() perturbation_loss += l.item() g = torch.mean( torch.abs(list(self.model.parameters())[0].grad)) gradient += g.item() e = self.error(batch_classes[split:], output_classes) perturbation_error += e.item() batch_perturbations = batch_perturbed_images - batch_images[ split:] gradient /= self.args.max_iterations + 1 perturbation_loss /= self.args.max_iterations perturbation_error /= self.args.max_iterations iteration = self.epoch * num_batches + b + 1 self.train_statistics = numpy.vstack(( self.train_statistics, numpy.array([[ iteration, # iterations iteration * (1 + self.args.max_iterations) * self.args.batch_size, # samples seen min(num_batches, iteration) * self.args.batch_size + iteration * self.args.max_iterations * self.args.batch_size, # unique samples seen loss, error, perturbation_loss, perturbation_error, gradient ]]))) if b % self.args.skip == self.args.skip // 2: log('[Training] %d | %d: %g (%g) %g (%g) [%g]' % ( self.epoch, b, numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 3]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 4]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 5]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, 6]), numpy.mean(self.train_statistics[ max(0, iteration - self.args.skip):iteration, -1]), )) self.debug('clean.%d.png' % self.epoch, batch_images.permute(0, 2, 3, 1)) self.debug('perturbed.%d.png' % self.epoch, batch_perturbed_images.permute(0, 2, 3, 1)) self.debug('perturbation.%d.png' % self.epoch, batch_perturbations.permute(0, 2, 3, 1), cmap='seismic')
def test(self): """ Test the model. """ self.model.eval() log('[Training] %d set classifier to eval' % self.epoch) assert self.model.training is False loss = error = perturbation_loss = perturbation_error = success = iterations = norm = 0 num_batches = int(math.ceil(self.args.test_samples/self.args.batch_size)) for b in range(num_batches): perm = numpy.take(range(self.args.test_samples), range(b*self.args.batch_size, (b+1)*self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) loss += e.data # 0-dim tensor a = self.error(batch_classes, output_classes) error += a.data loss /= num_batches error /= num_batches num_batches = int(math.ceil(self.args.attack_samples/self.args.batch_size)) assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[0] for b in range(num_batches): perm = numpy.take(range(self.args.attack_samples), range(b*self.args.batch_size, (b+1)*self.args.batch_size), mode='clip') batch_images = common.torch.as_variable(self.test_images[perm], self.args.use_gpu) batch_classes = common.torch.as_variable(self.test_codes[perm], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) objective = self.objective_class() attack = self.setup_attack(self.model, batch_images, batch_classes) s, p, _, _, _ = attack.run(objective, False) batch_images = batch_images + common.torch.as_variable(p.astype(numpy.float32), self.args.use_gpu) output_classes = self.model(batch_images) e = self.loss(batch_classes, output_classes) perturbation_loss += e.item() e = self.error(batch_classes, output_classes) perturbation_error += e.item() iterations += numpy.mean(s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1 norm += numpy.mean(numpy.linalg.norm(p.reshape(p.shape[0], -1), axis=1, ord=self.norm)) success += numpy.sum(s >= 0)/self.args.batch_size perturbation_error /= num_batches perturbation_loss /= num_batches success /= num_batches iterations /= num_batches norm /= num_batches log('[Training] %d: test %g (%g) %g (%g)' % (self.epoch, loss, error, perturbation_loss, perturbation_error)) log('[Training] %d: test %g (%g, %g)' % (self.epoch, success, iterations, norm)) num_batches = int(math.ceil(self.train_images.shape[0]/self.args.batch_size)) iteration = self.epoch*num_batches self.test_statistics = numpy.vstack((self.test_statistics, numpy.array([[ iteration, iteration * self.args.batch_size, min(num_batches, iteration) * self.args.batch_size, loss, error, perturbation_loss, perturbation_error, success, iterations, norm ]])))