示例#1
0
def test_direction(cossim_pow, inceptionv1):
    mixed_4a_depth = 508
    random_direction = np.random.random((mixed_4a_depth))
    objective = objectives.direction("mixed4a_pre_relu",
                                     random_direction,
                                     cossim_pow=cossim_pow)
    assert_gradient_ascent(objective, inceptionv1)
示例#2
0
    def render_activation_grid_very_naive(self,
                                          img,
                                          layer="mixed4d",
                                          W=42,
                                          n_steps=256):

        # Get the activations
        with tf.Graph().as_default(), tf.Session() as sess:
            t_input = tf.placeholder("float32", [None, None, None, 3])
            T = render.import_model(self.model, t_input, t_input)
            acts = T(layer).eval({t_input: img[None]})[0]
        acts_flat = acts.reshape([-1] + [acts.shape[2]])

        # Render an image for each activation vector
        def param_f():
            return param.image(W, batch=acts_flat.shape[0])

        obj = objectives.Objective.sum([
            objectives.direction(layer, v, batch=n)
            for n, v in enumerate(acts_flat)
        ])
        thresholds = (n_steps // 2, n_steps)
        vis_imgs = render.render_vis(self.model,
                                     obj,
                                     param_f,
                                     thresholds=thresholds)[-1]

        # Combine the images and display the resulting grid
        vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])
        vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]
        show(np.hstack(np.hstack(vis_imgs_cropped)))
        return vis_imgs_cropped
示例#3
0
def neuron_groups(img, filename, layer, n_groups=10, attr_classes=None, filenumber=0):
    # Compute activations
    dirname = '../images/' + filename+'/'
    if attr_classes is None:
        attr_classes = []
    with tf.Graph().as_default(), tf.Session():
        t_input = tf.placeholder_with_default(img, [None, None, 3])
        T = render.import_model(model, t_input, t_input)
        acts = T(layer).eval()

    # We'll use ChannelReducer (a wrapper around scikit learn's factorization tools)
    # to apply Non-Negative Matrix factorization (NMF).

    nmf = ChannelReducer(n_groups, "NMF")
    spatial_factors = nmf.fit_transform(acts)[0].transpose(2, 0, 1).astype("float32")
    channel_factors = nmf._reducer.components_.astype("float32")

    # Let's organize the channels based on their horizontal position in the image

    x_peak = np.argmax(spatial_factors.max(1), 1)
    ns_sorted = np.argsort(x_peak)
    spatial_factors = spatial_factors[ns_sorted]
    channel_factors = channel_factors[ns_sorted]

    # And create a feature visualziation of each group

    param_f = lambda: param.image(80, batch=n_groups)
    obj = sum(objectives.direction(layer, channel_factors[i], batch=i)
              for i in range(n_groups))
    group_icons = render.render_vis(model, obj, param_f, verbose=False)[-1]

    # We'd also like to know about attribution

    # First, let's turn each group into a vector over activations
    group_vecs = [spatial_factors[i, ..., None] * channel_factors[i]
                  for i in range(n_groups)]

    attrs = np.asarray([raw_class_group_attr(img, layer, attr_class, group_vecs)
                        for attr_class in attr_classes])

    print(
        attrs
    )
    try:
        os.mkdir(dirname )

    except Exception as e:
        print(e)
    # Let's render the visualization!
    finally:
        with open(dirname + '/attrs.txt', 'w') as f_w:
            f_w.write(str(attrs))
        for index, icon in enumerate(group_icons):
            imgdata=to_image_url(icon)
            print(imgdata)
            imgdata = base64.b64decode(str(imgdata))
            print(imgdata)
            with open(dirname + str(index) + '.png', 'wb') as f_jpg:
                f_jpg.write(imgdata)
示例#4
0
def neuron_groups(model, img, layer, n_groups=6, attr_classes=[]):
    # Compute activations

    with tf.Graph().as_default(), tf.Session():
        t_input = tf.placeholder_with_default(img, [None, None, 3])
        T = render.import_model(model, t_input, t_input)
        acts = T(layer).eval()

    # We'll use ChannelReducer (a wrapper around scikit learn's factorization tools)
    # to apply Non-Negative Matrix factorization (NMF).

    nmf = ChannelReducer(n_groups, "PCA")
    print(layer, n_groups)
    spatial_factors = nmf.fit_transform(acts)[0].transpose(2, 0, 1).astype("float32")
    channel_factors = nmf._reducer.components_.astype("float32")

    # Let's organize the channels based on their horizontal position in the image

    x_peak = np.argmax(spatial_factors.max(1), 1)
    ns_sorted = np.argsort(x_peak)
    spatial_factors = spatial_factors[ns_sorted]
    channel_factors = channel_factors[ns_sorted]

    # And create a feature visualziation of each group

    param_f = lambda: param.image(80, batch=n_groups)
    obj = sum(objectives.direction(layer, channel_factors[i], batch=i)
              for i in range(n_groups))
    group_icons = render.render_vis(model, obj, param_f, verbose=False)[-1]

    # We'd also like to know about attribution
    #
    # First, let's turn each group into a vector over activations
    group_vecs = [spatial_factors[i, ..., None] * channel_factors[i]
                  for i in range(n_groups)]

    attrs = np.asarray([raw_class_group_attr(img, layer, attr_class, model, group_vecs)
                        for attr_class in attr_classes])

    gray_scale_groups = [skimage.color.rgb2gray(icon) for icon in group_icons]

    # Let's render the visualization!
    data = {
        "img": _image_url(img),
        "n_groups": n_groups,
        "spatial_factors": [_image_url(factor[..., None] / np.percentile(spatial_factors, 99) * [1, 0, 0]) for factor in
                            spatial_factors],
        "group_icons": [_image_url(icon) for icon in gray_scale_groups]
    }

    # with open('ng.pickle', 'wb') as handle:
    #     pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
    # with open('./svelte_python/ng.pickle', 'rb') as p_file:
    #     data = pickle.load(p_file)

    generate_html('neuron_groups', data)
示例#5
0
    def render_activation_grid_less_naive(self,
                                          img,
                                          layer="mixed4d",
                                          W=42,
                                          n_groups=6,
                                          subsample_factor=1,
                                          n_steps=256):
        # Get the activations
        with tf.Graph().as_default(), tf.Session() as sess:
            t_input = tf.placeholder("float32", [None, None, None, 3])
            T = render.import_model(self.model, t_input, t_input)
            acts = T(layer).eval({t_input: img[None]})[0]
        acts_flat = acts.reshape([-1] + [acts.shape[2]])
        N = acts_flat.shape[0]

        # The trick to avoiding "decoherence" is to recognize images that are
        # for similar activation vectors and
        if n_groups > 0:
            reducer = ChannelReducer(n_groups, "NMF")
            groups = reducer.fit_transform(acts_flat)
            groups /= groups.max(0)
        else:
            groups = np.zeros([])

        print(groups.shape)

        # The key trick to increasing memory efficiency is random sampling.
        # Even though we're visualizing lots of images, we only run a small
        # subset through the network at once. In order to do this, we'll need
        # to hold tensors in a tensorflow graph around the visualization process.
        with tf.Graph().as_default() as graph, tf.Session() as sess:
            # Using the groups, create a paramaterization of images that
            # partly shares paramters between the images for similar activation
            # vectors. Each one still has a full set of unique parameters, and could
            # optimize to any image. We're just making it easier to find solutions
            # where things are the same.
            group_imgs_raw = param.fft_image([n_groups, W, W, 3])
            unique_imgs_raw = param.fft_image([N, W, W, 3])
            opt_imgs = param.to_valid_rgb(tf.stack([
                0.7 * unique_imgs_raw[i] +
                0.5 * sum(groups[i, j] * group_imgs_raw[j]
                          for j in range(n_groups)) for i in range(N)
            ]),
                                          decorrelate=True)

            # Construct a random batch to optimize this step
            batch_size = 64
            rand_inds = tf.random_uniform([batch_size], 0, N, dtype=tf.int32)
            pres_imgs = tf.gather(opt_imgs, rand_inds)
            pres_acts = tf.gather(acts_flat, rand_inds)
            obj = objectives.Objective.sum([
                objectives.direction(layer, pres_acts[n], batch=n)
                for n in range(batch_size)
            ])

            # Actually do the optimization...
            T = render.make_vis_T(self.model, obj, param_f=pres_imgs)
            tf.global_variables_initializer().run()

            for i in range(n_steps):
                T("vis_op").run()
                if (i + 1) % (n_steps // 2) == 0:
                    show(pres_imgs.eval()[::4])

            vis_imgs = opt_imgs.eval()

        # Combine the images and display the resulting grid
        print("")
        vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])
        vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]
        show(np.hstack(np.hstack(vis_imgs_cropped)))
        return vis_imgs_cropped
def neuron_groups(imglist, filenamelist, layer, n_groups=6, attr_classes=None):
    # Compute activations
    filename = ''
    for f in filenamelist:
        filename += f
    with open('result/' + filename + '.html', 'a') as f:
        f.write('''<!DOCTYPE html>
                        <html>
                        <head >
                          <title>%s</title>
                              <script src='GroupWidget_1cb0e0d.js'></script>
                        </head>
                        <body>''' % (filename))
    for key, img in enumerate(imglist):
        if attr_classes is None:
            attr_classes = []
        with tf.Graph().as_default(), tf.Session():
            t_input = tf.placeholder_with_default(img, [None, None, 3])
            T = render.import_model(model, t_input, t_input)
            acts = T(layer).eval()

        # We'll use ChannelReducer (a wrapper around scikit learn's factorization tools)
        # to apply Non-Negative Matrix factorization (NMF).

        nmf = ChannelReducer(n_groups, "NMF")
        spatial_factors = nmf.fit_transform(acts)[0].transpose(
            2, 0, 1).astype("float32")
        channel_factors = nmf._reducer.components_.astype("float32")

        # Let's organize the channels based on their horizontal position in the image

        x_peak = np.argmax(spatial_factors.max(1), 1)
        ns_sorted = np.argsort(x_peak)
        spatial_factors = spatial_factors[ns_sorted]
        channel_factors = channel_factors[ns_sorted]

        # And create a feature visualziation of each group

        param_f = lambda: param.image(80, batch=n_groups)
        obj = sum(
            objectives.direction(layer, channel_factors[i], batch=i)
            for i in range(n_groups))
        group_icons = render.render_vis(model, obj, param_f, verbose=False)[-1]

        # We'd also like to know about attribution

        # First, let's turn each group into a vector over activations
        group_vecs = [
            spatial_factors[i, ..., None] * channel_factors[i]
            for i in range(n_groups)
        ]

        attrs = np.asarray([
            raw_class_group_attr(img, layer, attr_class, group_vecs)
            for attr_class in attr_classes
        ])

        print(attrs)

        # Let's render the visualization!

        with open('result/' + filename + '.html', 'a') as f:
            f.write('''  <main%s></main%s>
                          <script>
                            var app = new GroupWidget_1cb0e0d({
                              target: document.querySelector( 'main%s' ),''' %
                    (key, key, key))
            f.write('''data: {''')
            f.write('"img":"%s",\n' % str(_image_url(img)))
            f.write('"n_groups"' + ":" + str(n_groups) + ',\n')
            f.write('"spatial_factors"' + ":" + str([
                _image_url(factor[..., None] /
                           np.percentile(spatial_factors, 99) * [1, 0, 0])
                for factor in spatial_factors
            ]) + ',\n')
            f.write('"group_icons"' + ":" +
                    str([_image_url(icon) for icon in group_icons]) + ',\n')
            f.write('''} });''')
            f.write('''</script>''')

    with open('result/' + filename + '.html', 'a') as f:
        f.write('''</body></html >''')
    print(filename)