def save_image(img, name):
    '''
  Normalize and save the image.
  '''
    img = img[:, ::-1, :, :]  # Convert from BGR to RGB
    normalized_img = patchShow.patchShow_single(img, in_range=(-120, 120))
    scipy.misc.imsave(name, normalized_img)
Exemple #2
0
def save_image(img, name):
  '''
  Normalize and save the image.
  '''
  img = img[:,::-1, :, :] # Convert from BGR to RGB
  normalized_img = patchShow.patchShow_single(img, in_range=(-120,120))        
  scipy.misc.imsave(name, normalized_img)
Exemple #3
0
def activation_maximization(net, generator, gen_in_layer, gen_out_layer, start_code, params, 
      clip=False, debug=False, unit=None, xy=0, upper_bound=None, lower_bound=None):

  # Get the input and output sizes
  data_shape = net.blobs['data'].data.shape
  generator_output_shape = generator.blobs[gen_out_layer].data.shape

  # Calculate the difference between the input image to the net being visualized
  # and the output image from the generator
  image_size = get_shape(data_shape)
  output_size = get_shape(generator_output_shape)

  # The top left offset that we start cropping the output image to get the 227x227 image
  topleft = ((output_size[0] - image_size[0])/2, (output_size[1] - image_size[1])/2)

  print "Starting optimizing"

  x = None
  src = generator.blobs[gen_in_layer]
  
  # Make sure the layer size and initial vector size match
  assert_array_equal(src.data.shape, start_code.shape)

  # Take the starting code as the input to the generator
  src.data[:] = start_code.copy()[:]

  # Initialize an empty result
  best_xx = np.zeros(image_size)[np.newaxis]
  best_act = -sys.maxint

  # Save the activation of each image generated
  list_acts = []

  for o in params:
    
    # select layer
    layer = o['layer']

    for i in xrange(o['iter_n']):

      step_size = o['start_step_size'] + ((o['end_step_size'] - o['start_step_size']) * i) / o['iter_n']
      
      # 1. pass the code to generator to get an image x0
      generated = generator.forward(feat=src.data[:])
      x0 = generated[gen_out_layer]   # 256x256# Convert from BGR to RGB
      normalized_img = patchShow.patchShow_single(x0, in_range=(-120,120))        
      scipy.misc.imsave("x0.jpg", normalized_img)

      # Crop from 256x256 to 227x227
      cropped_x0 = x0.copy()[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]]

      # 2. forward pass the image x0 to net to maximize an unit k
      # 3. backprop the gradient from net to the image to get an updated image x
      grad_norm_net, x, act = make_step_net(net=net, end=layer, unit=unit, image=cropped_x0, xy=xy, step_size=step_size)
      
      # Save the solution
      # Note that we're not saving the solutions with the highest activations
      # Because there is no correlation between activation and recognizability
      best_xx = cropped_x0.copy()
      best_act = act

      # 4. Place the changes in x (227x227) back to x0 (256x256)
      updated_x0 = x0.copy()        
      updated_x0[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] = x.copy()

      # 5. backprop the image to generator to get an updated code
      grad_norm_generator, updated_code = make_step_generator(net=generator, x=updated_x0, x0=x0, 
          start=gen_in_layer, end=gen_out_layer, step_size=step_size)

      # Clipping code
      if clip:
        updated_code = np.clip(updated_code, a_min=-1, a_max=1) # VAE prior is within N(0,1)

      # Clipping each neuron independently
      elif upper_bound is not None:
        updated_code = np.maximum(updated_code, lower_bound) 
        updated_code = np.minimum(updated_code, upper_bound) 

      # L2 on code to make the feature vector smaller every iteration
      if o['L2'] > 0 and o['L2'] < 1:
        updated_code[:] *= o['L2']

      # Update code
      src.data[:] = updated_code

      # Print x every 10 iterations
      if debug:
        print " > %s " % i
        name = "./debug/%s.jpg" % str(i).zfill(3)

        save_image(x.copy(), name)

        # Save acts for later
        list_acts.append( (name, act) )

      # Stop if grad is 0
      if grad_norm_generator == 0:
        print " grad_norm_generator is 0"
        break
      elif grad_norm_net == 0:
        print " grad_norm_net is 0"
        break
#       elif best_act > 0.50:
#         print "Activation reached above 0.50, breaking to avoid saturation"
#         break

  # returning the resulting image
  print " -------------------------"
  print " Result: obj act [%s] " % best_act

  if debug:
    print "Saving list of activations..."
    for p in list_acts:
      name = p[0]
      act = p[1]

      write_label(name, act)

  return best_xx , best_act