def reconstruction_error(self, batch_images, output_images): """ Reconstruction loss. :param batch_images: target images :type batch_images: torch.autograd.Variable :param output_images: predicted images :type output_images: torch.autograd.Variable :return: error :rtype: torch.autograd.Variable """ return torch.mean(torch.mul(batch_images - output_images, batch_images - output_images))
def reconstruction_loss(self, batch_images, output_images): """ Reconstruction loss. :param batch_images: original images :type batch_images: torch.autograd.Variable :param output_images: output images :type output_images: torch.autograd.Variable :return: error :rtype: torch.autograd.Variable """ if self.args.absolute_error: return torch.sum(torch.abs(batch_images - output_images)) else: return torch.sum(torch.mul(batch_images - output_images, batch_images - output_images))
def compute_appr(self): """ Compute approximate. """ assert self.test_codes is not None num_batches = int(math.ceil(self.perturbations.shape[0] / self.args.batch_size)) for b in range(num_batches): b_start = b * self.args.batch_size b_end = min((b + 1) * self.args.batch_size, self.perturbations.shape[0]) batch_classes = common.torch.as_variable(self.test_codes[b_start: b_end], self.args.use_gpu) batch_theta = common.torch.as_variable(self.test_theta[b_start: b_end].astype(numpy.float32), self.args.use_gpu, True) batch_perturbation = common.torch.as_variable(self.perturbations[b_start: b_end].astype(numpy.float32), self.args.use_gpu) if isinstance(self.model, models.SelectiveDecoder): self.model.set_code(batch_classes) batch_theta = torch.nn.Parameter(batch_theta) optimizer = torch.optim.Adam([batch_theta], lr=0.1) log('[Detection] %d: start' % b) for t in range(100): optimizer.zero_grad() output_perturbation = self.model.forward(batch_theta) error = torch.mean(torch.mul(output_perturbation - batch_perturbation, output_perturbation - batch_perturbation)) error.backward() optimizer.step() log('[Detection] %d: %d = %g' % (b, t, error.item())) output_perturbation = numpy.squeeze(output_perturbation.cpu().detach().numpy()) self.projected_perturbations = common.numpy.concatenate(self.projected_perturbations, output_perturbation) batch_theta = common.torch.as_variable(self.test_theta[b_start: b_end].astype(numpy.float32), self.args.use_gpu, True) batch_images = common.torch.as_variable(self.test_images[b_start: b_end].astype(numpy.float32), self.args.use_gpu) batch_theta = torch.nn.Parameter(batch_theta) optimizer = torch.optim.Adam([batch_theta], lr=0.5) log('[Detection] %d: start' % b) for t in range(100): optimizer.zero_grad() output_images = self.model.forward(batch_theta) error = torch.mean(torch.mul(output_images - batch_images, output_images - batch_images)) error.backward() optimizer.step() log('[Detection] %d: %d = %g' % (b, t, error.item())) output_images = numpy.squeeze(output_images.cpu().detach().numpy()) self.projected_test_images = common.numpy.concatenate(self.projected_test_images, output_images) projected_perturbations = self.projected_perturbations.reshape((self.projected_perturbations.shape[0], -1)) projected_test_images = self.projected_test_images.reshape((self.projected_test_images.shape[0], -1)) perturbations = self.perturbations.reshape((self.perturbations.shape[0], -1)) test_images = self.test_images.reshape((self.test_images.shape[0], -1)) success = numpy.logical_and(self.success >= 0, self.accuracy) log('[Detection] %d valid attacked samples' % numpy.sum(success)) self.distances['true'] = numpy.linalg.norm(perturbations - projected_perturbations, ord=2, axis=1) self.angles['true'] = numpy.rad2deg(common.numpy.angles(perturbations.T, projected_perturbations.T)) self.distances['true'] = self.distances['true'][success] self.angles['true'] = self.angles['true'][success] self.distances['test'] = numpy.linalg.norm(test_images - projected_test_images, ord=2, axis=1) self.angles['test'] = numpy.rad2deg(common.numpy.angles(test_images.T, projected_test_images.T)) self.distances['test'] = self.distances['test'][success] self.angles['test'] = self.angles['test'][success]
def compute_true(self): """ Compute true. """ assert self.test_codes is not None num_batches = int(math.ceil(self.perturbations.shape[0] / self.args.batch_size)) params = { 'lr': 0.09, 'lr_decay': 0.95, 'lr_min': 0.0000001, 'weight_decay': 0, } for b in range(num_batches): b_start = b * self.args.batch_size b_end = min((b + 1) * self.args.batch_size, self.perturbations.shape[0]) batch_fonts = self.test_codes[b_start: b_end, 1] batch_classes = self.test_codes[b_start: b_end, 2] batch_code = numpy.concatenate((common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype( numpy.float32) batch_code = common.torch.as_variable(batch_code, self.args.use_gpu) batch_images = common.torch.as_variable(self.test_images[b_start: b_end], self.args.use_gpu) batch_images = batch_images.permute(0, 3, 1, 2) batch_theta = common.torch.as_variable(self.test_theta[b_start: b_end].astype(numpy.float32), self.args.use_gpu, True) batch_perturbation = common.torch.as_variable(self.perturbations[b_start: b_end].astype(numpy.float32), self.args.use_gpu) self.model.set_code(batch_code) #output_images = self.model.forward(batch_theta) #test_error = torch.mean(torch.mul(output_images - batch_images, output_images - batch_images)) #print(test_error.item()) #vis.mosaic('true.png', batch_images.cpu().detach().numpy()[:, 0, :, :]) #vis.mosaic('output.png', output_images.cpu().detach().numpy()[:, 0, :, :]) # print(batch_images.cpu().detach().numpy()[0]) # print(output_images.cpu().detach().numpy()[0, 0]) #_batch_images = batch_images.cpu().detach().numpy() #_output_images = output_images.cpu().detach().numpy()[:, 0, :, :] #test_error = numpy.max(numpy.abs(_batch_images.reshape(_batch_images.shape[0], -1) - _output_images.reshape(_output_images.shape[0], -1)), axis=1) #print(test_error) #test_error = numpy.mean(numpy.multiply(_batch_images - _output_images, _batch_images - _output_images), axis=1) #print(test_error) batch_theta = torch.nn.Parameter(batch_theta) scheduler = ADAMScheduler([batch_theta], **params) log('[Detection] %d: start' % b) for t in range(100): scheduler.update(t//10, float(t)/10) scheduler.optimizer.zero_grad() output_perturbation = self.model.forward(batch_theta) error = torch.mean(torch.mul(output_perturbation - batch_perturbation, output_perturbation - batch_perturbation)) test_error = torch.mean(torch.mul(output_perturbation - batch_images, output_perturbation - batch_images)) #error.backward() #scheduler.optimizer.step() log('[Detection] %d: %d = %g, %g' % (b, t, error.item(), test_error.item())) output_perturbation = numpy.squeeze(numpy.transpose(output_perturbation.cpu().detach().numpy(), (0, 2, 3, 1))) self.projected_perturbations = common.numpy.concatenate(self.projected_perturbations, output_perturbation) projected_perturbations = self.projected_perturbations.reshape((self.projected_perturbations.shape[0], -1)) perturbations = self.perturbations.reshape((self.perturbations.shape[0], -1)) success = numpy.logical_and(self.success >= 0, self.accuracy) log('[Detection] %d valid attacked samples' % numpy.sum(success)) self.distances['true'] = numpy.linalg.norm(perturbations - projected_perturbations, ord=2, axis=1) self.angles['true'] = numpy.rad2deg(common.numpy.angles(perturbations.T, projected_perturbations.T)) self.distances['true'] = self.distances['true'][success] self.angles['true'] = self.angles['true'][success] self.distances['test'] = numpy.zeros((numpy.sum(success))) self.angles['test'] = numpy.zeros((numpy.sum(success)))