Exemple #1
0
 def __init__(self):
     self.graph_def = None
     if hasattr(self, 'labels_path') and self.labels_path is not None:
         self.labels = load(self.labels_path, split=True)
     if hasattr(self, 'synsets_path') and self.synsets_path is not None:
         self.synset_ids = load(self.synsets_path, split=True)
         self.synsets = [synset_from_id(id) for id in self.synset_ids]
Exemple #2
0
 def load_from_graphdef(graphdef_url):
   graph_def = load(graphdef_url)
   metadata = model_util.extract_metadata(graph_def)
   if metadata:
     return Model.load_from_metadata(graphdef_url, metadata)
   else:
     raise ValueError("Model.load_from_graphdef was called on a GraphDef ({}) that does not contain Lucid's metadata node. Model.load only works for models saved via Model.save. For the graphdef you're trying to load, you will need to provide custom metadata; see Model.load_from_metadata()".format(graphdef_url))
Exemple #3
0
def get_aligned_activations(layer):
    activation_paths = [
        PATH_TEMPLATE.format(sanitize(layer.model_class.name), sanitize(layer.name), page)
        for page in range(NUMBER_OF_PAGES)
    ]
    activations = [load(path) for path in activation_paths]
    return np.vstack(activations)
Exemple #4
0
 def labels(self):
     if not hasattr(self, 'labels_path') or self.labels_path is None:
         raise RuntimeError(
             "This model does not have a labels_path specified!")
     if not self._labels:
         self._labels = load(self.labels_path, split=True)
     return self._labels
Exemple #5
0
 def synset_ids(self):
     if not hasattr(self, 'synsets_path') or self.synsets_path is None:
         raise RuntimeError(
             "This model does not have a synset_path specified!")
     if not self._synset_ids:
         self._synset_ids = load(self.synsets_path, split=True)
     return self._synset_ids
Exemple #6
0
def make_caricature(image_url, saved_model_folder_url, to, *args, **kwargs):
    image = load(image_url)
    model = SerializedModel.from_directory(saved_model_folder_url)
    layers = model.layer_names
    caricatures = caricature(image,
                             model,
                             layers,
                             *args,
                             verbose=False,
                             **kwargs)

    results = {"type": "caricature"}

    save_input_url = join(to, "input.jpg")
    save(caricatures[0], save_input_url)
    results["input_image"] = save_input_url

    values_list = []
    for single_caricature, layer_name in zip(caricatures[1:],
                                             model.layer_names):
        save_caricature_url = join(to, layer_name + ".jpg")
        save(single_caricature, save_caricature_url)
        values_list.append({
            "type": "image",
            "url": save_caricature_url,
            "shape": single_caricature.shape
        })
    results["values"] = values_list

    save(results, join(to, "results.json"))

    return results
Exemple #7
0
def load_graphdef(model_url, reset_device=True):
    """Load GraphDef from a binary proto file."""
    graph_def = load(model_url)

    if reset_device:
        for n in graph_def.node:
            n.device = ""

    return graph_def
def main():
    model = models.InceptionV1()
    model.load_graphdef()
    img = load(
        "https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png"
    )
    act = ActivationGrid(model)
    # very naive still takes some time to run
    result = act.render_activation_grid_very_naive(img, W=48, n_steps=1024)
    result = act.render_activation_grid_less_naive(img, W=48, n_steps=1024)
Exemple #9
0
 def from_directory(cls, model_path):
   manifest_path = path.join(model_path, 'manifest.json')
   try:
     manifest = load(manifest_path)
   except Exception as e:
     raise ValueError("Could not find manifest.json file in dir {}. Error: {}".format(model_path, e))
   if manifest.get('type', 'frozen') == 'frozen':
     return FrozenGraphModel(model_path, manifest)
   else: # TODO: add tf.SavedModel support, etc
     raise NotImplementedError("SerializedModel Manifest type '{}' has not been implemented!".format(manifest.get('type')))
Exemple #10
0
def callSpatialAttr(imageName, layer1, layer2):
    model = chestnet()
    model.load_graphdef()
    img = load(imageName)
    spatial_spatial_attr(model,
                         img,
                         layer1,
                         layer2,
                         hint_label_1="Atelectasis",
                         hint_label_2="Consolidation")
Exemple #11
0
  def load_from_manifest(manifest_url):
    try:
      manifest = load(manifest_url)
    except Exception as e:
      raise ValueError("Could not find manifest.json file in dir {}. Error: {}".format(manifest_url, e))

    if manifest.get('type', 'frozen') == 'frozen':
      manifest_folder = path.dirname(manifest_url)
      return FrozenGraphModel(manifest_folder, manifest)
    else:
      raise NotImplementedError("SerializedModel Manifest type '{}' has not been implemented!".format(manifest.get('type')))
Exemple #12
0
    def load_ops(model):

        # Load the metadata info so we can get a list of the ops
        metadata = load(
            f"gs://openai-clarity/encyclopedia/graph_metadata/model={model.name}/metadata.json"
        )
        # Filter the ops list to only the ones that we are interested in
        ops = [(op_key, op['channels'])
               for op_key, op in metadata['ops'].items()
               if op['op_type'] in ('Relu', 'Conv2D') and op['rank'] == 4]
        return ops
Exemple #13
0
def get_aligned_activations(layer) -> np.ndarray:
    """Downloads 100k activations of the specified layer sampled from iterating over
    ImageNet. Activations of all layers where sampled at the same spatial positions for
    each image, allowing the calculation of correlations."""
    activation_paths = [
        PATH_TEMPLATE.format(sanitize(layer.model_name), sanitize(layer.name),
                             page) for page in range(NUMBER_OF_PAGES)
    ]
    activations = np.vstack([load(path) for path in activation_paths])
    assert np.all(np.isfinite(activations))
    return activations
def main():
    attr = ChannelAttribution(model)
    """# Channel attributions from article teaser"""
    # img = load("dog_cat.png")
    img = load(
        "https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png"
    )
    attr.channel_attr(img,
                      "mixed4d",
                      "Labrador retriever",
                      "tiger cat",
                      mode="simple",
                      n_show=3)
Exemple #15
0
    def load_activations(model,
                         op_name,
                         num_activations=100,
                         batch_size=4096,
                         num_activations_per_image=1):
        activations_collected_per_image = 16  # This is hardcoded from the collection process
        if num_activations_per_image > activations_collected_per_image:
            raise ValueError(
                "Attempting to use more activations than were collected per image."
            )
        activations = []
        coordinates = []
        for s in range(0,
                       math.ceil(num_activations / num_activations_per_image),
                       batch_size):
            e = s + batch_size
            # acts_per_image=16&end=1003520&model=AlexNet&sampling_strategy=random&split=train&start=999424
            loaded_activations = load(
                f"gs://openai-clarity/encyclopedia/collect_activations/acts_per_image=16&end={e}&model={model.name}&sampling_strategy=random&split=train&start={s}/{op_name}-activations.npy"
            )
            loaded_coordinates = load(
                f"gs://openai-clarity/encyclopedia/collect_activations/acts_per_image=16&end={e}&model={model.name}&sampling_strategy=random&split=train&start={s}/{op_name}-image_crops.npy"
            )

            activations.append(
                loaded_activations[:, 0:num_activations_per_image, :])
            coordinates.append(
                loaded_coordinates[:, 0:num_activations_per_image, :])
        acts = np.concatenate(activations)
        flattened_acts = acts.reshape(
            (acts.shape[0] * acts.shape[1], acts.shape[2]))

        coords = np.concatenate(coordinates)
        flattened_coords = coords.reshape(
            (coords.shape[0] * coords.shape[1], coords.shape[2]))
        return flattened_acts[:num_activations,
                              ], flattened_coords[:num_activations, ]
Exemple #16
0
def render_set(n, channel):

    print("Starting", channel, n)
    obj = objectives.channel(channel, n)

    # Add this to "sharpen" the image... too much and it gets crazy
    #obj += 0.001*objectives.total_variation()

    sess = create_session()
    t_size = tf.placeholder_with_default(size_n, [])

    f_model = os.path.join(save_model_dest, channel + f"_{n}.npy")

    T = render.make_vis_T(
        model,
        obj,
        param_f=lambda: cppn(t_size),
        transforms=[],
        optimizer=optimizer,
    )
    tf.global_variables_initializer().run()
    train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

    if not os.path.exists(f_model):

        for i in tqdm(range(training_steps)):
            _, loss = sess.run([
                T("vis_op"),
                T("loss"),
            ])

        # Save trained variables
        params = np.array(sess.run(train_vars), object)
        save(params, f_model)
    else:
        params = load(f_model)

    # Save final image
    feed_dict = dict(zip(train_vars, params))
    feed_dict[t_size] = image_size
    images = T("input").eval(feed_dict)
    img = images[0]
    sess.close()

    f_image = os.path.join(save_image_dest, channel + f"_{n}.jpg")
    imageio.imwrite(f_image, img)
    print(f"Saved to {f_image}")
Exemple #17
0
                    image_f.write(res.content)


if __name__ == '__main__':
    # download()
    # for roots, dirs, files in os.walk('../sources'):
    #     for index, f in enumerate(files):
    #         print(f)
    #         img = load('../sources/' + f)
    #         img = img[:, :, 0:3]
    #         neuron_groups(img, str(index), "mixed5a", 2, ["Persian_cat"], filenumber=0)
    #         break
    for roots  in range(1,20):


    img = load("test.jpg")
    # filename='tutu'
    # img = img[:,:,0:3]
    # neuron_groups(img, filename, "mixed5a", 2, ["guacamole"],filenumber=0)
    print('x01qwe3赖美云 https://www.duweas.com/laimeiyun/')

# 4d
# tabby
# [[ 0.74442852  0.91507626 -1.47238791  0.45166963  0.42995349  1.96742225
#    1.36328828  2.02877903  2.45953035 -0.94934189  1.11171043  1.10638499
#    0.04262164  0.23066241  1.62526214  0.4787069   0.6795724   0.66656137]]
# adv_tabby
# [[ 0.74019086  0.80981618  0.52197969  0.79553312  1.85799694  0.53119451
#    1.37018788  0.39277077  1.71987665  2.58694148  0.25573224  0.85892642
#   -1.35404253  1.81914413  1.73091662  0.27351204  0.38520172 -1.72054458]]
# guacamole
Exemple #18
0
model.load_graphdef()


def compare_attr_methods(attr, img, class1, class2):
    _display_html("<h2>Linear Attribution</h2>")
    attr.channel_attr(img, "mixed4d", class1, class2, mode="simple", n_show=10)

    _display_html("<br><br><h2>Path Integrated Attribution</h2>")
    attr.channel_attr(img, "mixed4d", class1, class2, mode="path", n_show=10)

    _display_html("<br><br><h2>Stochastic Path Integrated Attribution</h2>")
    attr.channel_attr(img,
                      "mixed4d",
                      class1,
                      class2,
                      mode="path",
                      n_show=10,
                      stochastic_path=True)


attr = ChannelAttribution(model)
"""# Channel attributions from article teaser"""
img = load(
    "https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png"
)
attr.channel_attr(img,
                  "mixed4d",
                  "Labrador retriever",
                  "tiger cat",
                  mode="simple",
                  n_show=3)
Exemple #19
0
def load_text_labels(labels_path):
    return load(labels_path).splitlines()
Exemple #20
0
def imresize(img, size, scale=255):
    from PIL import Image
    im = Image.fromarray((img * scale).astype(np.uint8))
    return np.array(im.resize(size, Image.BICUBIC)).astype(np.float32) / scale


tokenizer = SimpleTokenizer()

tf.reset_default_graph()
inp_text, T_text = CLIPText().load()
inp_img, T_img = CLIPImage().load()

sess = tf.Session()

captions = ["This is a dog", "This is a cat", "This is a dog and a cat"]
tokens = []
for caption in captions:
    tokens.append(tokenizer.tokenize(caption)[0])

img = imresize(
    load("https://openaipublic.blob.core.windows.net/clarity/dog_cat.jpeg"),
    [288, 288])

text_embd = sess.run(T_text("text_post/l2_normalize"), {inp_text: tokens})
img_embd = sess.run(T_img("l2_normalize"), {inp_img: [img]})

scores = (text_embd @ img_embd.T)[:, 0]

for score, caption in zip(scores, captions):
    print(caption, score)
Exemple #21
0
def imagenet_synset_ids():
    return load(IMAGENET_SYNSETS_PATH, split=True)
def neuron_groups(img_name,
                  layers,
                  model,
                  attr_classes,
                  factorization_methods,
                  flag1,
                  flag_read_attr=False,
                  iter_num=100,
                  SG_path=False,
                  labels=None,
                  pos_flag=1,
                  thres_explained_var=0.7,
                  vis_random_seed=0,
                  image_size=0,
                  debug_flag=0):
    img = load(img_name)
    # img = load("./data/doghead224.jpeg")
    # img = load("./data/cathead224.jpeg")
    # img = resize(img, (224, 224, 3), order=1, mode='constant', anti_aliasing=False).astype(np.float32)

    for attr_class in attr_classes:
        root_directory = create_root_dir(img_name, attr_class, flag1)

        if flag1 == "Shap":
            AM_list_L, logit_list, channel_attr_list, kept_channel_list_L \
              = compute_shap(img, model, attr_class, layers,
                                flag1, flag_read_attr=flag_read_attr,
                                iter_num=iter_num, labels=labels, save_directory=root_directory)
        elif flag1 == "IGSG":
            AM_list_L, logit_list, channel_attr_list, kept_channel_list_L \
              = compute_igsg(img, model, attr_class, layers,
                              flag1, flag_read_attr=flag_read_attr,
                              iter_num=iter_num, SG_path=SG_path,
                              labels=labels, save_directory=root_directory)
        else:
            continue

        print_result_from_logit(logit_list, labels)
        for i_pos_neg in range(pos_flag):
            AM_list = AM_list_L[i_pos_neg]
            kept_channel_list = kept_channel_list_L[i_pos_neg]

            if debug_flag == 1:
                debug_show_AM_plus_img(AM_list, img, model)
            if i_pos_neg < 1:
                reverse_suffix = '_pos'
            else:
                reverse_suffix = '_neg'

            for layer_index, AM in enumerate(AM_list):
                layer = layers[layer_index]
                channel_attr = channel_attr_list[layer_index]
                kept_channel_index = kept_channel_list[layer_index]

                for factorization_method in factorization_methods:
                    spatial_factors, channel_factors, n_groups = \
                      decompose_AM_get_group_num(factorization_method, AM, thres_explained_var)

                    # if debug_flag == 1:
                    #   decompose_AM_with_UMAP(AM, n_groups)

                    channel_factors_max_index, channel_shap, short_index, long_index, \
                      n_groups = map_shap_attr_to_long(channel_factors, channel_attr, kept_channel_index)

                    # AM = np.squeeze(AM)
                    spatial_factors = weight_AM2spatial_factor(
                        AM, spatial_factors, n_groups, short_index,
                        kept_channel_index, channel_attr, i_pos_neg)

                    # If the attribution is negative, channel_shap should be multiply -1
                    if i_pos_neg > 0:
                        channel_shap = channel_shap * -1

                    # Sorting based on sum of Shapley values
                    ns_sorted = get_sort_groups_with_shap_scores(channel_shap)
                    every_group_attr_sorted, spatial_factors, channel_shap, n_groups =\
                      sort_groups_features(ns_sorted, spatial_factors, channel_shap, n_groups)

                    no_slash_layer_name = ''.join(layer.split('/'))
                    save_directory = create_factorization_dir(
                        root_directory, factorization_method,
                        no_slash_layer_name, reverse_suffix, n_groups)
                    gen_spatial_heat_maps(85, n_groups, spatial_factors,
                                          save_directory, attr_class,
                                          factorization_method,
                                          no_slash_layer_name, img, AM, model)
                    gen_info_txt(channel_shap, n_groups, save_directory,
                                 factorization_method, attr_class,
                                 every_group_attr_sorted)

                    # Using feature attributions times activation maps as loss function to update visualization image
                    channel_shap = channel_shap.astype("float32")
                    obj = sum(
                        utils.dot_attr_actmaps(layer, channel_shap[i], batch=i)
                        for i in range(n_groups))
                    '''
          For feature visualization, the library "lucid" will be useful because
          it has implements many loss functions of different literatures, image processing operators,
          and collected several pretrained tensorflow network.
          '''
                    transforms = [
                        transform.pad(12),
                        transform.jitter(8),
                        transform.random_scale(
                            [n / 100. for n in range(80, 120)]),
                        transform.random_rotate(
                            list(range(-10, 10)) + list(range(-5, 5)) +
                            10 * list(range(-2, 2))),
                        transform.jitter(2)
                    ]

                    # image parameterization with shared params for aligned optimizing images
                    def interpolate_f():
                        unique = fft_image(
                            (n_groups, image_size, image_size, 3),
                            random_seed=vis_random_seed)
                        shared = [
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 2, image_size // 2, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 4, image_size // 4, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 8, image_size // 8, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (2, image_size // 8, image_size // 8, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (1, image_size // 16, image_size // 16, 3),
                                random_seed=vis_random_seed),
                            lowres_tensor(
                                (n_groups, image_size, image_size, 3),
                                (2, image_size // 16, image_size // 16, 3),
                                random_seed=vis_random_seed)
                        ]
                        return utils.to_valid_rgb(unique + sum(shared),
                                                  decorrelate=True)

                    group_icons = render.render_vis(
                        model,
                        objective_f=obj,
                        param_f=interpolate_f,
                        transforms=transforms,
                        verbose=False,
                        relu_gradient_override=False)[-1]
                    save_imgs(group_icons, save_directory, attr_class,
                              factorization_method, no_slash_layer_name)
                    print("Layer {} and class {} -> finished".format(
                        layer, attr_class))
print("Learning Rate", LEARNING_RATE)
print('--------------------')

# ## Input your content image here.
#
# The `load` function takes a link or local filepath. Input images will be forced to squares.

# In[ ]:

# Load from a URL
if len(sys.argv) > 1:
    local_path = sys.argv[1]
else:
    local_path = "./F561f22668fee4.jpg"

CONTENT_IMAGE = load(local_path)[..., :3]  # Remove transparency channel

# Or load from a local path
#CONTENT_IMAGE = load("local_path.jpg")[..., :3]  # Remove transparency channel

show(CONTENT_IMAGE)

# ## Run!

# In[ ]:

# print(558)
lol = LucidGraph(CONTENT_IMAGE,
                 32,
                 8,
                 NUMBER_STROKES,
Exemple #24
0
    if layer_name == "localresponsenorm1":
        layer_name = "conv2d02"

    if layer_name == "conv2d0":
        pass
        return "https://storage.googleapis.com/clarity-public/colah/experiments/aprox_weights_1/%s_%s.png" % (
            layer_name, n)
    elif layer_name in ["conv2d1", "conv2d2"]:
        return "https://storage.googleapis.com/clarity-public/colah/experiments/aprox_weights_1/%s_%s.png" % (
            layer_name, n)
    else:
        return "https://openai-clarity.storage.googleapis.com/model-visualizer%2F1556758232%2FInceptionV1%2Ffeature_visualization%2Falpha%3DFalse%26layer_name%3D" + layer_name + "%26negative%3DFalse%26objective_name%3Dneuron%2Fchannel_index=" + str(
            n) + ".png"


for layer in list(layer_sizes.keys())[5:6]:  #[3:5]:
    W = W_dict[layer]
    for unit in range(layer_sizes[layer]):
        url = vis_url(layer, unit)
        img = load(url)
        D = (img.shape[0] - W) // 2
        if layer in ["mixed3a", "mixed3b"]:
            D += 5
        if layer in ["mixed4a"]:
            D += 10
        img = img[D:D + W, D:D + W]
        save(img, "public/images/neuron/%s_%s.jpg" % (layer, unit))
        print(".", end="")
        if (unit + 1) % 20 == 0: print("")
    print("\n")
Exemple #25
0
        "size2": attrs.shape[0],
        "layer2": layer2,
        "img": _image_url(img),
        "hint1": _image_url(hint1),
        "hint2": _image_url(hint2)
    }
    generate_html('spatial_attr', data)


def callSpatialAttr(imageName, layer1, layer2):
    model = chestnet()
    model.load_graphdef()
    img = load(imageName)
    spatial_spatial_attr(model,
                         img,
                         layer1,
                         layer2,
                         hint_label_1="Atelectasis",
                         hint_label_2="Consolidation")


if __name__ == "__main__":
    model = chestnet()
    model.load_graphdef()
    img = load("https://i.imgur.com/Z7xW5iy.jpg")
    spatial_spatial_attr(model,
                         img,
                         "conv4_block1_concat/concat",
                         "conv5_block1_concat/concat",
                         hint_label_1="Atelectasis",
                         hint_label_2="Consolidation")
Exemple #26
0
        spritemap_n,
        "attrsPos": [{
            "n": n,
            "v": str(float(channel_attr[n]))[:5]
        } for n in ns_pos],
        "attrsNeg": [{
            "n": n,
            "v": str(float(channel_attr[n]))[:5]
        } for n in ns_neg]
    })


"""# Channel attributions from article teaser"""

img = load(
    "https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png"
)
channel_attr_simple(img,
                    "mixed4d",
                    "Labrador retriever",
                    "tiger cat",
                    n_show=3)

img = load(
    "https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png"
)
channel_attr_simple(img, "mixed4d", "vase", "lemon", n_show=3)

img = load(
    "https://storage.googleapis.com/lucid-static/building-blocks/examples/sunglasses_tux.png"
)
Exemple #27
0
def test_image():
    return load("./tests/fixtures/dog_cat_112.jpg")
Exemple #28
0
def run(identifier):

    import os
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    import numpy as np
    import tensorflow as tf
    import math

    from lucid.misc.io import load, show, save
    from clarity.dask.cluster import get_client

    import lucid.optvis.objectives as objectives
    import lucid.optvis.param as param
    import lucid.optvis.render as render
    import lucid.optvis.transform as transform

    from lucid.modelzoo.vision_models import InceptionV1, AlexNet
    import matplotlib.pyplot as plt
    from lucid.misc.io.writing import write_handle

    from clarity.utils.distribute import DistributeDask, DistributeMPI
    from lucid.modelzoo.nets_factory import models_map, get_model

    def spritesheet(imgs):
        k = int(np.ceil(np.sqrt(imgs.shape[0])))
        Z = np.zeros((k**2, imgs.shape[1], imgs.shape[2], imgs.shape[3]))
        Z[0:imgs.shape[0]] = imgs
        Z = np.reshape(Z, (k,k,imgs.shape[1], imgs.shape[2], imgs.shape[3]))
        Z = np.concatenate(Z, 1)
        Z = np.concatenate(Z, 1)
        return Z

    def render_atlas_tile(model,op_name,directions,icon_size=45,n_steps=127,transforms_amount=1,cossim_pow=0,L2_amount=2):      
        transforms_options = [
            [
                transform.jitter(2)
            ],
            [
                transform.pad(12, mode="constant", constant_value=.5),
                transform.jitter(8),
                transform.random_scale([1 + (i - 5) / 50. for i in range(11)]),
                transform.random_rotate(list(range(-10, 11)) + 5 * [0]),
                transform.jitter(4),
            ],
            [
                transform.pad(2, mode='constant', constant_value=.5),
                transform.jitter(4),
                transform.jitter(4),
                transform.jitter(8),
                transform.jitter(8),
                transform.jitter(8),
                transform.random_scale([0.995**n for n in range(-5,80)] + [0.998**n for n in 2*list(range(20,40))]),
                transform.random_rotate(list(range(-20,20))+list(range(-10,10))+list(range(-5,5))+5*[0]),
                transform.jitter(2),
            ],
        ]
        
        param_f = lambda: param.image(icon_size, batch=directions.shape[0])
        obj = objectives.Objective.sum(
          [objectives.direction_neuron(op_name, v, batch=n, cossim_pow=cossim_pow)
           for n,v in enumerate(directions)
          ]) - L2_amount * objectives.L2("input", 0.5) * objectives.L2("input", 0.5)
        thresholds=(n_steps//2, n_steps)

        vis_imgs = render.render_vis(model, obj, param_f, transforms=transforms_options[transforms_amount], thresholds=thresholds, verbose=False)[-1]

        return vis_imgs

    def draw_atlas(A, coordinates):

        grid_size = np.max(np.array(coordinates)) + 1
        canvas = np.ones((grid_size*A.shape[1], grid_size*A.shape[1],3))

        def slice_xy(x,y,img):
            s = A.shape[1]
            canvas[s*x:s*(x+1), s*y:s*(y+1),:] = img

        for i in range(len(coordinates)):
            slice_xy(coordinates[i][0], coordinates[i][1], A[i])

        return canvas

    def run_xy(model, layername, filename, means, coordinates, identifier):

        cluster = DistributeMPI()

        for i in range(means.shape[0]):
            cluster.submit(render_atlas_tile, model, layername, means[i:i+1])

        results = cluster.run()

        if cluster.is_master():
            r = np.array(results)[:,0,:,:,:]

            # Save spritesheet
            result1 = spritesheet(r)
            path = f"clarity-public/ggoh/Diff/{identifier}/atlas_{filename}.webp"
            save(result1, "gs://{}".format(path))
            print("https://storage.googleapis.com/{}".format(path))

            # Save filename
            print(save(r, f"gs://clarity-public/ggoh/Diff/{identifier}/r{filename}z.npy"))

            # Save coordinates
            canvas = draw_atlas(r, coordinates)
            print(save(canvas, f"gs://clarity-public/ggoh/Diff/{identifier}/rendered_atlas_{filename}z.webp"))

        cluster.comm.Barrier()

    manifest = load(f"https://storage.googleapis.com/clarity-public/ggoh/Diff/{identifier}/manifest.json")
    model_x, model_y, ops_x, ops_y = get_model(manifest['model_x']), get_model(manifest['model_y']), manifest['ops_x'], manifest['ops_y']
    model_x.load_graphdef()
    model_y.load_graphdef()

    means_x = np.nan_to_num(np.array(manifest["means_x"])) # Replace Nan's (no elements in bin with 0's)
    means_y = np.nan_to_num(np.array(manifest["means_y"])) 
    
    run_xy(model_y, ops_y, "y", means_y, manifest["coordinates"], identifier)
    run_xy(model_x, ops_x, "x", means_x, manifest["coordinates"], identifier)
# Control delimiters, rows, column names with read_csv (see later) 
data = pd.read_csv("./data/data.csv") 
# Load data 
url_list = list(data['IMAGE LINK'])

def style_transfer_param(content_image, style_image, decorrelate=True, fft=True):
  style_transfer_input = param.image(*content_image.shape[:2], decorrelate=decorrelate, fft=fft)[0]
  content_input = content_image
  style_input = tf.random_crop(style_image, content_image.shape)
  return tf.stack([style_transfer_input, content_input, style_input])

# these constants help remember which image is at which batch dimension
TRANSFER_INDEX = 0
CONTENT_INDEX = 1
STYLE_INDEX = 2
content_image = load("style/style1.png")[..., :3]
style_image = load("style/style1.png")[..., :3]

def mean_L1(a, b):
  return tf.reduce_mean(tf.abs(a-b))

@wrap_objective
def activation_difference(layer_names, activation_loss_f=mean_L1, transform_f=None, difference_to=CONTENT_INDEX):
  def inner(T):
    # first we collect the (constant) activations of image we're computing the difference to
    image_activations = [T(layer_name)[difference_to] for layer_name in layer_names]
    if transform_f is not None:
      image_activations = [transform_f(act) for act in image_activations]
    
    # we also set get the activations of the optimized image which will change during optimization
    optimization_activations = [T(layer)[TRANSFER_INDEX] for layer in layer_names]
Exemple #30
0
def image():
    return load("./tests/fixtures/rgbeye.png")