コード例 #1
0
ファイル: caricature.py プロジェクト: waterponey/lucid
def feature_inversion(img, model, layer=None, n_steps=512, cossim_pow=0.0):
  with tf.Graph().as_default(), tf.Session() as sess:
    img = imgToModelSize(img, model)
    
    objective = objectives.Objective.sum([
        1.0 * dot_compare(layer, cossim_pow=cossim_pow),
        objectives.blur_input_each_step(),
    ])

    t_input = tf.placeholder(tf.float32, img.shape)
    param_f = param.image(img.shape[0], decorrelate=True, fft=True, alpha=False)
    param_f = tf.stack([param_f[0], t_input])

    transforms = [
      transform.pad(8, mode='constant', constant_value=.5),
      transform.jitter(8),
      transform.random_scale([0.9, 0.95, 1.05, 1.1] + [1]*4),
      transform.random_rotate(list(range(-5, 5)) + [0]*5),
      transform.jitter(2),
    ]

    T = render.make_vis_T(model, objective, param_f, transforms=transforms)
    loss, vis_op, t_image = T("loss"), T("vis_op"), T("input")

    tf.global_variables_initializer().run()
    for i in range(n_steps): _ = sess.run([vis_op], {t_input: img})

    result = t_image.eval(feed_dict={t_input: img})
    show(result[0])
コード例 #2
0
 def feature_inversion(model,
                       layer,
                       example_image,
                       n_steps=512,
                       cossim_pow=1.0,
                       input_blur_coeff=0.0):
     with tf.Graph().as_default(), tf.Session() as sess:
         model.load_graphdef()
         model_name = type(model).__name__
         img_shape = model.image_shape
         img = example_image
         objective = objectives.Objective.sum([
             dot_compare(layer, cossim_pow=cossim_pow),
             input_blur_coeff * objectives.blur_input_each_step(),
         ])
         t_input = tf.placeholder(tf.float32, img_shape)
         param_f = param.image(img_shape[0])
         param_f = tf.stack([param_f[0], t_input])
         T = render.make_vis_T(model,
                               objective,
                               param_f,
                               transforms=transform.standard_transforms)
         loss, vis_op, t_image = T("loss"), T("vis_op"), T("input")
         tf.global_variables_initializer().run()
         for i in range(n_steps):
             _ = sess.run([vis_op], {t_input: img})
         return t_image.eval(feed_dict={t_input: img})[0]
コード例 #3
0
def test_blur_input_each_step(inceptionv1):
    objective = objectives.blur_input_each_step()
    assert_gradient_ascent(objective, inceptionv1)
コード例 #4
0
ファイル: feature.py プロジェクト: xgxofdream/BioExp
    def run(self,
            layer,
            class_,
            channel=None,
            style_template=None,
            transforms=False,
            opt_steps=1000):
        """
    layer         : layer_name to visualize
    class_        : class to consider
    style_template: template for comparision of generated activation maximization map
    transforms    : transforms required
    opt_steps     : number of optimization steps
    """

        self.layer = layer
        self.channel = channel if channel is not None else 0

        with tf.Graph().as_default() as graph, tf.Session() as sess:

            if style_template is not None:
                gram_template = tf.constant(
                    np.load(style_template),  #[1:-1,:,:],
                    dtype=tf.float32)
                print('Gram Shape = {}'.format(gram_template.shape))

            obj = self._channel(self.layer + "/convolution",
                                self.channel,
                                gram=style_template)
            obj += -self.L1 * objectives.L1(constant=.5)
            obj += self.TV * objectives.total_variation()
            obj += self.blur * objectives.blur_input_each_step()

            if transforms == True:
                transforms = [
                    transform.pad(2 * self.jitter),
                    transform.jitter(self.jitter),
                    transform.random_scale(
                        [self.scale**(n / 10.) for n in range(-10, 11)]),
                    transform.random_rotate(
                        range(-self.rotate, self.rotate + 1))
                ]
            else:
                transforms = []

            T = render.make_vis_T(
                self.model,
                obj,
                param_f=lambda: self.image(240,
                                           channels=self.n_channels,
                                           fft=self.decorrelate,
                                           decorrelate=self.decorrelate),
                optimizer=None,
                transforms=transforms,
                relu_gradient_override=False)
            tf.initialize_all_variables().run()

            for i in range(opt_steps):
                T("vis_op").run()

            plt.figure(figsize=(10, 10))

            texture_images = []

            for i in range(1, self.n_channels + 1):
                plt.subplot(1, self.n_channels, i)
                image = T("input").eval()[:, :, :, i - 1].reshape((240, 240))
                print("channel: ", i, image.min(), image.max())
                plt.imshow(image,
                           cmap='gray',
                           interpolation='bilinear',
                           vmin=0.,
                           vmax=1.)
                plt.xticks([])
                plt.yticks([])
                texture_images.append(image)
                # show(np.hstack(T("input").eval()))

        if class_ is not None:
            os.makedirs(os.path.join(self.savepath, class_), exist_ok=True)
            plt.savefig(os.path.join(
                self.savepath, class_,
                self.layer + '_' + str(self.channel) + '.png'),
                        bbox_inches='tight')
        else:
            plt.savefig(os.path.join(
                self.savepath, self.layer + '_' + str(self.channel) + '.png'),
                        bbox_inches='tight')
        return np.array(texture_images).transpose(1, 2, 0)