Example #1
0
    def run_xy(model, layername, filename, means, coordinates, identifier):

        cluster = DistributeMPI()

        for i in range(means.shape[0]):
            cluster.submit(render_atlas_tile, model, layername, means[i:i+1])

        results = cluster.run()

        if cluster.is_master():
            r = np.array(results)[:,0,:,:,:]

            # Save spritesheet
            result1 = spritesheet(r)
            path = f"clarity-public/ggoh/Diff/{identifier}/atlas_{filename}.webp"
            save(result1, "gs://{}".format(path))
            print("https://storage.googleapis.com/{}".format(path))

            # Save filename
            print(save(r, f"gs://clarity-public/ggoh/Diff/{identifier}/r{filename}z.npy"))

            # Save coordinates
            canvas = draw_atlas(r, coordinates)
            print(save(canvas, f"gs://clarity-public/ggoh/Diff/{identifier}/rendered_atlas_{filename}z.webp"))

        cluster.comm.Barrier()
Example #2
0
def render_set(
    channel,
    n_iter,
    prefix,
    starting_pos=None,
    force=False,
    objective=None,
):

    f_model = os.path.join(save_model_dest, channel + f"_{prefix}.npy")
    f_image = os.path.join(save_image_dest, channel + f"_{prefix}.png")
    if os.path.exists(f_model) and not force:
        return True

    print("Starting", channel, prefix)
    obj = objective

    # Add this to "sharpen" the image... too much and it gets crazy
    #obj += 0.001*objectives.total_variation()

    sess = create_session()
    t_size = tf.placeholder_with_default(size_n, [])

    param_f = lambda: cppn(t_size)

    T = render.make_vis_T(
        model,
        obj,
        param_f=param_f,
        transforms=[],
        optimizer=optimizer,
    )
    tf.global_variables_initializer().run()

    # Assign the starting weights
    if starting_pos is not None:
        for v, x in zip(tf.trainable_variables(), starting_pos):
            sess.run(tf.assign(v, x))

    for i in tqdm(range(n_iter)):
        _, loss = sess.run([
            T("vis_op"),
            T("loss"),
        ])

    # Save trained variables
    train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    params = np.array(sess.run(train_vars), object)

    save(params, f_model)

    # Save final image
    images = T("input").eval({t_size: 600})
    img = images[0]
    sess.close()

    imsave(f_image, img)
Example #3
0
  def save(save_url, input_name, output_names, image_shape, image_value_range):
    metadata = {
      "input_name" : input_name,
      "image_shape" : image_shape,
      "image_value_range": image_value_range,
    }

    graph_def = model_util.frozen_default_graph_def([input_name], output_names)
    model_util.infuse_metadata(graph_def, metadata)
    save(graph_def, save_url)
Example #4
0
  def save(save_url, input_name, output_names, image_shape, image_value_range):
    if ":" in input_name:
      raise ValueError("input_name appears to be a tensor (name contains ':') but must be an op.")
    if any([":" in name for name in output_names]):
      raise ValueError("output_namess appears to be contain tensor (name contains ':') but must be ops.")

    metadata = {
      "input_name" : input_name,
      "image_shape" : image_shape,
      "image_value_range": image_value_range,
    }

    graph_def = model_util.frozen_default_graph_def([input_name], output_names)
    model_util.infuse_metadata(graph_def, metadata)
    save(graph_def, save_url)
Example #5
0
def test_aligned_activation_atlas():
    model1 = AlexNet()
    layer1 = model1.layers[1]

    model2 = InceptionV1()
    layer2 = model2.layers[8]  # mixed4d

    atlasses = aligned_activation_atlas(model1,
                                        layer1,
                                        model2,
                                        layer2,
                                        number_activations=subset)
    path = "tests/recipes/results/activation_atlas/aligned_atlas-{}-of-{}.jpg".format(
        index, len(atlasses))
    for index, atlas in enumerate(atlasses):
        save(atlas, path)
Example #6
0
def render_set(n, channel):

    print("Starting", channel, n)
    obj = objectives.channel(channel, n)

    # Add this to "sharpen" the image... too much and it gets crazy
    #obj += 0.001*objectives.total_variation()

    sess = create_session()
    t_size = tf.placeholder_with_default(size_n, [])

    f_model = os.path.join(save_model_dest, channel + f"_{n}.npy")

    T = render.make_vis_T(
        model,
        obj,
        param_f=lambda: cppn(t_size),
        transforms=[],
        optimizer=optimizer,
    )
    tf.global_variables_initializer().run()
    train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

    if not os.path.exists(f_model):

        for i in tqdm(range(training_steps)):
            _, loss = sess.run([
                T("vis_op"),
                T("loss"),
            ])

        # Save trained variables
        params = np.array(sess.run(train_vars), object)
        save(params, f_model)
    else:
        params = load(f_model)

    # Save final image
    feed_dict = dict(zip(train_vars, params))
    feed_dict[t_size] = image_size
    images = T("input").eval(feed_dict)
    img = images[0]
    sess.close()

    f_image = os.path.join(save_image_dest, channel + f"_{n}.jpg")
    imageio.imwrite(f_image, img)
    print(f"Saved to {f_image}")
 def save_result_for_output(cls, result: object, output: object) -> None:
     """TODO: move io_adapter into lucid.misc.io and DELETE this!"""
     if result is None:
         logging.info(
             f"Task did not return a result. You can check the declared output: {output}"
         )
         return
     if isinstance(output, str):
         # if output.startswith("/"): # = is a canonical path
         if isinstance(result, str):
             with io.writing(output) as output_file:
                 output_file.write(result.encode())
             # TODO: loaders and savers? Assume serialized already for now.
         else:
             with io.writing(output) as output_file:
                 save(result, output_file)
     else:
         raise NotImplementedError
Example #8
0
def make_caricature(image_url, saved_model_folder_url, to, *args, **kwargs):
    image = load(image_url)
    model = SerializedModel.from_directory(saved_model_folder_url)
    layers = model.layer_names
    caricatures = caricature(image,
                             model,
                             layers,
                             *args,
                             verbose=False,
                             **kwargs)

    results = {"type": "caricature"}

    save_input_url = join(to, "input.jpg")
    save(caricatures[0], save_input_url)
    results["input_image"] = save_input_url

    values_list = []
    for single_caricature, layer_name in zip(caricatures[1:],
                                             model.layer_names):
        save_caricature_url = join(to, layer_name + ".jpg")
        save(single_caricature, save_caricature_url)
        values_list.append({
            "type": "image",
            "url": save_caricature_url,
            "shape": single_caricature.shape
        })
    results["values"] = values_list

    save(results, join(to, "results.json"))

    return results
      'face': np.uint32(mesh['face'].ravel())
  }
  for key, value in data_to_save.items():
    data = value.tobytes()
    filename = '%s_%s.3d'%(name, key)
    write(data, filename)

for mesh_path in Path('article_models/').glob('*.obj'):
  mesh_name = mesh_path.stem
  print(mesh_name)
  
  tex_path = mesh_path.with_suffix('.jpg')
  if not tex_path.exists():
    tex_path = mesh_path.with_suffix('.png')
  
  mesh = meshutil.load_obj(str(mesh_path))
  mesh = meshutil.normalize_mesh(mesh)
  original_texture = prepare_image(str(tex_path), (TEXTURE_SIZE, TEXTURE_SIZE))
  
  export_mesh(mesh_name, mesh)
  lucid_io.save(original_texture, mesh_name+'_tex.jpg', quality=90)

  for style_name, url in styles:
    if style_name[0] == '#':
      continue
    style_img = prepare_image(url)
    reset(style_img, original_texture)
    run(mesh, step_n=800)
    texture = t_texture.eval()
    filename = '%s_tex_%s.jpg'%(mesh_name, style_name)
    lucid_io.save(texture, filename, quality=90)
Example #10
0
    if layer_name == "localresponsenorm1":
        layer_name = "conv2d02"

    if layer_name == "conv2d0":
        pass
        return "https://storage.googleapis.com/clarity-public/colah/experiments/aprox_weights_1/%s_%s.png" % (
            layer_name, n)
    elif layer_name in ["conv2d1", "conv2d2"]:
        return "https://storage.googleapis.com/clarity-public/colah/experiments/aprox_weights_1/%s_%s.png" % (
            layer_name, n)
    else:
        return "https://openai-clarity.storage.googleapis.com/model-visualizer%2F1556758232%2FInceptionV1%2Ffeature_visualization%2Falpha%3DFalse%26layer_name%3D" + layer_name + "%26negative%3DFalse%26objective_name%3Dneuron%2Fchannel_index=" + str(
            n) + ".png"


for layer in list(layer_sizes.keys())[5:6]:  #[3:5]:
    W = W_dict[layer]
    for unit in range(layer_sizes[layer]):
        url = vis_url(layer, unit)
        img = load(url)
        D = (img.shape[0] - W) // 2
        if layer in ["mixed3a", "mixed3b"]:
            D += 5
        if layer in ["mixed4a"]:
            D += 10
        img = img[D:D + W, D:D + W]
        save(img, "public/images/neuron/%s_%s.jpg" % (layer, unit))
        print(".", end="")
        if (unit + 1) % 20 == 0: print("")
    print("\n")
Example #11
0
def test_activation_atlas():
    model = AlexNet()
    layer = model.layers[1]
    atlas = activation_atlas(model, layer, number_activations=subset)
    save(atlas, "tests/recipes/results/activation_atlas/atlas.jpg")
Example #12
0
for facet in facets:
    layernames = [
        n.name for n in model.graph_def.node
        if ("image_block_3" in n.name) and ("Relu_2" in n.name)
    ][::2]

    def loadnpy(url):
        import blobfile
        from io import BytesIO
        fp = blobfile.BlobFile(url, "rb")
        x = np.load(BytesIO(fp.read()))
        fp.close()
        return x

    style_attrs = [
        loadnpy(
            f"https://openaipublic.blob.core.windows.net/clip/facets/{model.name}/{layername}/{facet}_spatial.npy"
        ) for layername in layernames
    ]
    for l2_weight in [10]:
        img = render_facet(model,
                           d,
                           layernames,
                           style_attrs,
                           l2_weight=l2_weight,
                           strength=(0.1, 5.0),
                           alpha=False,
                           resolution=256)
        save(img[0][-1], f"/root/{facet}.png")
    download = "wget --output-document hello.jpg '"+url1+"'"
    os.system(download)
    #os.system('wget --output-document auto.jpg {}'.format(url1))
    print(i)
    with open('hello.jpg', 'r+b') as f:
      with Image.open(f) as image:
          cover = resizeimage.resize_cover(image, [200, 190])
          cover.save('images/auto'+str(i)+'.jpg', image.format)
    # image optimisation
    ImageOps.equalize( Image.open("images/auto"+str(i)+".jpg")).save("images/auto"+str(i)+".jpg")
    os.system('rm hello.jpg')
    content_image = load('images/auto'+str(i)+'.jpg')[...,:3]
    style_image = load("style/style2.png")[..., :3] # choose a style

    param_f = lambda: style_transfer_param(content_image, style_image)
    content_obj = 100 * activation_difference(content_layers, difference_to=CONTENT_INDEX)
    content_obj.description = "Content Loss"

    style_obj = activation_difference(style_layers, transform_f=gram_matrix, difference_to=STYLE_INDEX)
    style_obj.description = "Style Loss"

    objective = - content_obj - style_obj

    vis = render.render_vis(model, objective, param_f=param_f, thresholds=[512], verbose=False, print_objectives=[content_obj, style_obj])[-1]
    savepath = "new/" + data['Keywords'][i] + data['Year'][i] +"_num_"+ str(i) + ".jpg"
    print("s")
    save(vis[0], savepath)
    print("end")
  except:
    continue
Example #14
0
def generate(
    *,
    output_dir,
    model_bytes,
    observations,
    observations_full=None,
    trajectories,
    policy_logits_name,
    value_function_name,
    env_name=None,
    numpy_precision=6,
    inline_js=True,
    inline_large_json=None,
    batch_size=512,
    action_combos=None,
    action_group_fns=[
        lambda combo: "RIGHT" in combo,
        lambda combo: "LEFT" in combo,
        lambda combo: "UP" in combo,
        lambda combo: "DOWN" in combo,
        lambda combo: "RIGHT" not in combo and "LEFT" not in combo and "UP"
        not in combo and "DOWN" not in combo,
    ],
    layer_kwargs={},
    input_layer_include=False,
    input_layer_name="input",
    gae_gamma=None,
    gae_lambda=None,
    trajectory_bookmarks=16,
    nmf_features=8,
    nmf_attr_opts=None,
    vis_subdiv_mults=[0.25, 0.5, 1, 2],
    vis_subdiv_mult_default=1,
    vis_expand_mults=[1, 2, 4, 8],
    vis_expand_mult_default=4,
    vis_thumbnail_num_mult=4,
    vis_thumbnail_expand_mult=4,
    scrub_range=(42 / 64, 44 / 64),
    attr_integrate_steps=10,
    attr_max_paths=None,
    attr_policy=False,
    attr_single_channels=True,
    observations_subdir="observations/",
    trajectories_subdir="trajectories/",
    trajectories_scrub_subdir="trajectories_scrub/",
    features_subdir="features/",
    thumbnails_subdir="thumbnails/",
    attribution_subdir="attribution/",
    attribution_scrub_subdir="attribution_scrub/",
    features_grids_subdir="features_grids/",
    attribution_totals_subdir="attribution_totals/",
    video_height="16em",
    video_width="16em",
    video_speed=12,
    policy_display_height="2em",
    policy_display_width="40em",
    navigator_width="24em",
    scrubber_height="4em",
    scrubber_width="48em",
    scrubber_visible_duration=256,
    legend_item_height="6em",
    legend_item_width="6em",
    feature_viewer_height="40em",
    feature_viewer_width="40em",
    attribution_weight=0.9,
    graph_colors={
        "v": "green",
        "action": "red",
        "action_group": "orange",
        "advantage": "blue",
    },
    trajectory_color="blue",
):
    from mpi4py import MPI

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    model = get_model(model_bytes)
    if rank == 0:
        js_source_path = get_compiled_js()

    if env_name is None:
        env_name = "unknown"
    if inline_large_json is None:
        inline_large_json = "://" not in output_dir
    layer_kwargs.setdefault("name_contains_one_of", None)
    layer_kwargs.setdefault("op_is_one_of", ["relu"])
    layer_kwargs.setdefault("bottleneck_only", True)
    layer_kwargs.setdefault("discard_first_n", 0)
    if observations_full is None:
        observations_full = observations
    if "observations_full" not in trajectories:
        trajectories["observations_full"] = trajectories["observations"]
    if np.issubdtype(observations.dtype, np.integer):
        observations = observations / np.float32(255)
    if np.issubdtype(observations_full.dtype, np.integer):
        observations_full = observations_full / np.float32(255)
    if np.issubdtype(trajectories["observations"].dtype, np.integer):
        trajectories[
            "observations"] = trajectories["observations"] / np.float32(255)
    if np.issubdtype(trajectories["observations_full"].dtype, np.integer):
        trajectories["observations_full"] = trajectories[
            "observations_full"] / np.float32(255)
    if action_combos is None:
        num_actions = get_shape(model, policy_logits_name)[-1]
        action_combos = list(map(lambda x: (str(x), ), range(num_actions)))
        if env_name == "coinrun_old":
            action_combos = [
                (),
                ("RIGHT", ),
                ("LEFT", ),
                ("UP", ),
                ("RIGHT", "UP"),
                ("LEFT", "UP"),
                ("DOWN", ),
                ("A", ),
                ("B", ),
            ][:num_actions]
    if gae_gamma is None:
        gae_gamma = 0.999
    if gae_lambda is None:
        gae_lambda = 0.95

    layer_names = get_layer_names(model,
                                  [policy_logits_name, value_function_name],
                                  **layer_kwargs)
    if not layer_names:
        raise RuntimeError("No appropriate layers found. "
                           "Please adapt layer_kwargs to your architecture")
    squash = lambda s: s.replace("/", "").replace("_", "")
    if len(set([squash(layer_key)
                for layer_key in layer_names.keys()])) < len(layer_names):
        raise RuntimeError("Error squashing abbreviated layer names. "
                           "Different substitutions must be used")
    mpi_enumerate = lambda l: (lambda indices: list(enumerate(l))[indices[
        rank]:indices[rank + 1]])(np.linspace(0, len(l),
                                              comm.Get_size() + 1).astype(int))
    save_image = lambda image, path: save(
        image, os.path.join(output_dir, path), domain=(0, 1))
    save_images = lambda images, path: save_image(
        concatenate_horizontally(images), path)
    json_preloaded = {}
    save_json = lambda data, path: (json_preloaded.update(
        {path: data}) if inline_large_json else save(
            data, os.path.join(output_dir, path), indent=None))
    get_scrub_slice = lambda width: slice(
        int(np.round(scrub_range[0] * width)),
        int(
            np.maximum(np.round(scrub_range[1] * width),
                       np.round(scrub_range[0] * width) + 1)),
    )
    action_groups = [[
        action for action, combo in enumerate(action_combos) if group_fn(combo)
    ] for group_fn in action_group_fns]
    action_groups = list(
        filter(lambda action_group: len(action_group) > 1, action_groups))

    for index, observation in mpi_enumerate(observations_full):
        observation_path = os.path.join(observations_subdir, f"{index}.png")
        save_image(observation, observation_path)
    for index, trajectory_observations in mpi_enumerate(
            trajectories["observations_full"]):
        trajectory_path = os.path.join(trajectories_subdir, f"{index}.png")
        save_images(trajectory_observations, trajectory_path)
        scrub_slice = get_scrub_slice(trajectory_observations.shape[2])
        scrub = trajectory_observations[:, :, scrub_slice, :]
        scrub_path = os.path.join(trajectories_scrub_subdir, f"{index}.png")
        save_images(scrub, scrub_path)

    trajectories["policy_logits"] = []
    trajectories["values"] = []
    for trajectory_observations in trajectories["observations"]:
        trajectories["policy_logits"].append(
            batched_get(
                trajectory_observations,
                batch_size,
                lambda minibatch: get_acts(model, policy_logits_name, minibatch
                                           ),
            ))
        trajectories["values"].append(
            batched_get(
                trajectory_observations,
                batch_size,
                lambda minibatch: get_acts(model, value_function_name,
                                           minibatch),
            ))
    trajectories["policy_logits"] = np.array(trajectories["policy_logits"])
    trajectories["values"] = np.array(trajectories["values"])
    trajectories["advantages"] = compute_gae(trajectories,
                                             gae_gamma=gae_gamma,
                                             gae_lambda=gae_lambda)
    if "dones" not in trajectories:
        trajectories["dones"] = np.concatenate(
            [
                trajectories["firsts"][:, 1:],
                np.zeros_like(trajectories["firsts"][:, :1]),
            ],
            axis=-1,
        )

    bookmarks = {
        "high": get_bookmarks(trajectories, sign=1, num=trajectory_bookmarks),
        "low": get_bookmarks(trajectories, sign=-1, num=trajectory_bookmarks),
    }

    nmf_kwargs = {"attr_layer_name": value_function_name}
    if nmf_attr_opts is not None:
        nmf_kwargs["attr_opts"] = nmf_attr_opts
    nmfs = {
        layer_key: LayerNMF(
            model,
            layer_name,
            observations,
            obses_full=observations_full,
            features=nmf_features,
            **nmf_kwargs,
        )
        for layer_key, layer_name in layer_names.items()
    }

    features = []
    attributions = []
    attribution_totals = []

    for layer_key, layer_name in layer_names.items():
        nmf = nmfs[layer_key]

        if rank == 0:
            thumbnails = []
            for number in range(nmf.features):
                thumbnail = nmf.vis_dataset_thumbnail(
                    number,
                    num_mult=vis_thumbnail_num_mult,
                    expand_mult=vis_thumbnail_expand_mult,
                )[0]
                thumbnail = rescale_opacity(thumbnail,
                                            max_scale=1,
                                            keep_zeros=True)
                thumbnails.append(thumbnail)
            thumbnails_path = os.path.join(thumbnails_subdir,
                                           f"{squash(layer_key)}.png")
            save_images(thumbnails, thumbnails_path)

        for _, number in mpi_enumerate(range(nmf.features)):
            feature = {
                "layer": layer_key,
                "number": number,
                "images": [],
                "overlay_grids": [],
                "metadata": {
                    "subdiv_mult": [],
                    "expand_mult": []
                },
            }
            for subdiv_mult in vis_subdiv_mults:
                for expand_mult in vis_expand_mults:
                    image, overlay_grid = nmf.vis_dataset(
                        number,
                        subdiv_mult=subdiv_mult,
                        expand_mult=expand_mult)
                    image = rescale_opacity(image)
                    filename_root = (f"{squash(layer_key)}_"
                                     f"feature{number}_"
                                     f"{number_to_string(subdiv_mult)}_"
                                     f"{number_to_string(expand_mult)}")
                    image_filename = filename_root + ".png"
                    overlay_grid_filename = filename_root + ".json"
                    image_path = os.path.join(features_subdir, image_filename)
                    overlay_grid_path = os.path.join(features_grids_subdir,
                                                     overlay_grid_filename)
                    save_image(image, image_path)
                    save_json(overlay_grid, overlay_grid_path)
                    feature["images"].append(image_filename)
                    feature["overlay_grids"].append(overlay_grid_filename)
                    feature["metadata"]["subdiv_mult"].append(subdiv_mult)
                    feature["metadata"]["expand_mult"].append(expand_mult)
            features.append(feature)

    for layer_key, layer_name in ([
        (input_layer_name, None)
    ] if input_layer_include else []) + list(layer_names.items()):
        if layer_name is None:
            nmf = None
        else:
            nmf = nmfs[layer_key]

        for index, trajectory_observations in mpi_enumerate(
                trajectories["observations"]):
            attribution = {
                "layer": layer_key,
                "trajectory": index,
                "images": [],
                "metadata": {
                    "type": [],
                    "data": [],
                    "direction": [],
                    "channel": []
                },
            }
            if layer_name is not None:
                totals = {
                    "layer": layer_key,
                    "trajectory": index,
                    "channels": [],
                    "residuals": [],
                    "metadata": {
                        "type": [],
                        "data": []
                    },
                }

            def get_attr_minibatch(minibatch,
                                   output_name,
                                   *,
                                   score_fn=default_score_fn):
                if layer_name is None:
                    return get_grad(model,
                                    output_name,
                                    minibatch,
                                    score_fn=score_fn)
                elif attr_max_paths is None:
                    return get_attr(
                        model,
                        output_name,
                        layer_name,
                        minibatch,
                        score_fn=score_fn,
                        integrate_steps=attr_integrate_steps,
                    )
                else:
                    return get_multi_path_attr(
                        model,
                        output_name,
                        layer_name,
                        minibatch,
                        nmf,
                        score_fn=score_fn,
                        integrate_steps=attr_integrate_steps,
                        max_paths=attr_max_paths,
                    )

            def get_attr_batched(output_name, *, score_fn=default_score_fn):
                return batched_get(
                    trajectory_observations,
                    batch_size,
                    lambda minibatch: get_attr_minibatch(
                        minibatch, output_name, score_fn=score_fn),
                )

            def transform_attr(attr):
                if layer_name is None:
                    return attr, None
                else:
                    attr_trans = nmf.transform(np.maximum(
                        attr, 0)) - nmf.transform(np.maximum(-attr, 0))
                    attr_res = (
                        attr -
                        (nmf.inverse_transform(np.maximum(attr_trans, 0)) -
                         nmf.inverse_transform(np.maximum(-attr_trans, 0)))
                    ).sum(-1, keepdims=True)
                    nmf_norms = nmf.channel_dirs.sum(-1)
                    return attr_trans * nmf_norms[None, None, None], attr_res

            def save_attr(attr, attr_res, *, type_, data):
                if attr_res is None:
                    attr_res = np.zeros_like(attr).sum(-1, keepdims=True)
                filename_root = f"{squash(layer_key)}_{index}_{type_}"
                if data is not None:
                    filename_root = f"{filename_root}_{data}"
                if layer_name is not None:
                    channels_filename = f"{filename_root}_channels.json"
                    residuals_filename = f"{filename_root}_residuals.json"
                    channels_path = os.path.join(attribution_totals_subdir,
                                                 channels_filename)
                    residuals_path = os.path.join(attribution_totals_subdir,
                                                  residuals_filename)
                    save_json(attr.sum(-2).sum(-2), channels_path)
                    save_json(attr_res[..., 0].sum(-1).sum(-1), residuals_path)
                    totals["channels"].append(channels_filename)
                    totals["residuals"].append(residuals_filename)
                    totals["metadata"]["type"].append(type_)
                    totals["metadata"]["data"].append(data)
                attr_scale = np.median(attr.max(axis=(-3, -2, -1)))
                if attr_scale == 0:
                    attr_scale = attr.max()
                if attr_scale == 0:
                    attr_scale = 1
                attr_scaled = attr / attr_scale
                attr_res_scaled = attr_res / attr_scale
                channels = ["prin", "all"]
                if attr_single_channels and layer_name is not None:
                    channels += list(range(nmf.features)) + ["res"]
                for direction in ["abs", "pos", "neg"]:
                    if direction == "abs":
                        attr = np.abs(attr_scaled)
                        attr_res = np.abs(attr_res_scaled)
                    elif direction == "pos":
                        attr = np.maximum(attr_scaled, 0)
                        attr_res = np.maximum(attr_res_scaled, 0)
                    elif direction == "neg":
                        attr = np.maximum(-attr_scaled, 0)
                        attr_res = np.maximum(-attr_res_scaled, 0)
                    for channel in channels:
                        if isinstance(channel, int):
                            attr_single = attr.copy()
                            attr_single[..., :channel] = 0
                            attr_single[..., (channel + 1):] = 0
                            images = channels_to_rgb(attr_single)
                        elif channel == "res":
                            images = attr_res.repeat(3, axis=-1)
                        else:
                            images = channels_to_rgb(attr)
                            if channel == "all":
                                images += attr_res.repeat(3, axis=-1)
                        images = brightness_to_opacity(
                            conv2d(images, filter_=norm_filter(15)))
                        suffix = f"{direction}_{channel}"
                        images_filename = f"{filename_root}_{suffix}.png"
                        images_path = os.path.join(attribution_subdir,
                                                   images_filename)
                        save_images(images, images_path)
                        scrub = images[:, :,
                                       get_scrub_slice(images.shape[2]), :]
                        scrub_path = os.path.join(attribution_scrub_subdir,
                                                  images_filename)
                        save_images(scrub, scrub_path)
                        attribution["images"].append(images_filename)
                        attribution["metadata"]["type"].append(type_)
                        attribution["metadata"]["data"].append(data)
                        attribution["metadata"]["direction"].append(direction)
                        attribution["metadata"]["channel"].append(channel)

            attr_v = get_attr_batched(value_function_name)
            attr_v_trans, attr_v_res = transform_attr(attr_v)
            save_attr(attr_v_trans, attr_v_res, type_="v", data=None)
            if attr_policy:
                attr_actions = np.array([
                    get_attr_batched(
                        policy_logits_name,
                        score_fn=lambda t: t[..., action],
                    ) for action in range(len(action_combos))
                ])
                # attr_pi = attr_actions.sum(axis=-1).transpose(
                #     (1, 2, 3, 0))
                # attr_pi = np.concatenate([
                #     attr_pi[..., group].sum(axis=-1, keepdims=True)
                #     for group in attr_action_groups
                # ],
                #                          axis=-1)
                # save_attr(attr_pi, None, type_='pi', data=None)
                for action, attr in enumerate(attr_actions):
                    attr_trans, attr_res = transform_attr(attr)
                    save_attr(attr_trans,
                              attr_res,
                              type_="action",
                              data=action)
                for action_group, actions in enumerate(action_groups):
                    attr = attr_actions[actions].sum(axis=0)
                    attr_trans, attr_res = transform_attr(attr)
                    save_attr(attr_trans,
                              attr_res,
                              type_="action_group",
                              data=action_group)
            attributions.append(attribution)
            if layer_name is not None:
                attribution_totals.append(totals)

    features = comm.gather(features, root=0)
    attributions = comm.gather(attributions, root=0)
    attribution_totals = comm.gather(attribution_totals, root=0)

    if rank == 0:
        features = [feature for l in features for feature in l]
        attributions = [attribution for l in attributions for attribution in l]
        attribution_totals = [
            totals for l in attribution_totals for totals in l
        ]
        layer_keys = ([input_layer_name] if input_layer_include else
                      []) + list(layer_names.keys())
        action_colors = get_html_colors(
            len(action_combos),
            grayscale=True,
            mix_with=np.array([0.75, 0.75, 0.75]),
            mix_weight=0.25,
        )
        props = {
            "input_layer": input_layer_name,
            "layers": layer_keys,
            "features": features,
            "attributions": attributions,
            "attribution_policy": attr_policy,
            "attribution_single_channels": attr_single_channels,
            "attribution_totals": attribution_totals,
            "colors": {
                "features": get_html_colors(nmf_features),
                "actions": action_colors,
                "graphs": graph_colors,
                "trajectory": trajectory_color,
            },
            "action_combos": action_combos,
            "action_groups": action_groups,
            "trajectories": {
                "actions": trajectories["actions"],
                "rewards": trajectories["rewards"],
                "dones": trajectories["dones"],
                "policy_logits": trajectories["policy_logits"],
                "values": trajectories["values"],
                "advantages": trajectories["advantages"],
            },
            "bookmarks": bookmarks,
            "vis_defaults": {
                "subdiv_mult": vis_subdiv_mult_default,
                "expand_mult": vis_expand_mult_default,
            },
            "subdirs": {
                "observations": observations_subdir,
                "trajectories": trajectories_subdir,
                "trajectories_scrub": trajectories_scrub_subdir,
                "features": features_subdir,
                "thumbnails": thumbnails_subdir,
                "attribution": attribution_subdir,
                "attribution_scrub": attribution_scrub_subdir,
                "features_grids": features_grids_subdir,
                "attribution_totals": attribution_totals_subdir,
            },
            "formatting": {
                "video_height": video_height,
                "video_width": video_width,
                "video_speed": video_speed,
                "policy_display_height": policy_display_height,
                "policy_display_width": policy_display_width,
                "navigator_width": navigator_width,
                "scrubber_height": scrubber_height,
                "scrubber_width": scrubber_width,
                "scrubber_visible_duration": scrubber_visible_duration,
                "legend_item_height": legend_item_height,
                "legend_item_width": legend_item_width,
                "feature_viewer_height": feature_viewer_height,
                "feature_viewer_width": feature_viewer_width,
                "attribution_weight": attribution_weight,
            },
            "json_preloaded": json_preloaded,
        }

        if inline_js:
            js_path = js_source_path
        else:
            with open(js_source_path, "r") as fp:
                js_code = fp.read()
            js_path = os.path.join(output_dir, "interface.js")
            with write_handle(js_path, "w") as fp:
                fp.write(js_code)
        html_path = os.path.join(output_dir, "interface.html")
        compile_html(
            js_path,
            html_path=html_path,
            props=props,
            precision=numpy_precision,
            inline_js=inline_js,
            svelte_to_js=False,
        )
        if output_dir.startswith("gs://"):
            if not inline_js:
                subprocess.run([
                    "gsutil",
                    "setmeta",
                    "-h",
                    "Content-Type: text/javascript",
                    js_path,
                ])
            subprocess.run([
                "gsutil", "setmeta", "-h", "Content-Type: text/html", html_path
            ])
        elif output_dir.startswith("https://"):
            output_dir_parsed = urllib.parse.urlparse(output_dir)
            az_account, az_hostname = output_dir_parsed.netloc.split(".", 1)
            if az_hostname == "blob.core.windows.net":
                az_container = removeprefix(output_dir_parsed.path,
                                            "/").split("/")[0]
                az_prefix = f"https://{az_account}.{az_hostname}/{az_container}/"
                if not inline_js:
                    js_az_name = removeprefix(js_path, az_prefix)
                    subprocess.run([
                        "az",
                        "storage",
                        "blob",
                        "update",
                        "--container-name",
                        az_container,
                        "--name",
                        js_az_name,
                        "--account-name",
                        az_account,
                        "--content-type",
                        "application/javascript",
                    ])
                html_az_name = removeprefix(html_path, az_prefix)
                subprocess.run([
                    "az",
                    "storage",
                    "blob",
                    "update",
                    "--container-name",
                    az_container,
                    "--name",
                    html_az_name,
                    "--account-name",
                    az_account,
                    "--content-type",
                    "text/html",
                ])
Example #15
0
def run(model, ops):

    import numpy as np
    import tensorflow as tf
    import math
    import urllib.parse
    import sklearn

    from umap import UMAP

    from lucid.misc.io import load, show, save

    from clarity.dask.cluster import get_client

    import lucid.optvis.objectives as objectives
    import lucid.optvis.param as param
    import lucid.optvis.render as render
    import lucid.optvis.transform as transform

    from lucid.modelzoo.vision_models import InceptionV1, AlexNet
    import matplotlib.pyplot as plt
    from lucid.misc.io.writing import write_handle

    from clarity.utils.distribute import DistributeDask, DistributeMPI
    from lucid.modelzoo.nets_factory import models_map, get_model

    # Produced by the "collect_activations" notebook
    def load_activations(model,
                         op_name,
                         num_activations=100,
                         batch_size=4096,
                         num_activations_per_image=1):
        activations_collected_per_image = 16  # This is hardcoded from the collection process
        if num_activations_per_image > activations_collected_per_image:
            raise ValueError(
                "Attempting to use more activations than were collected per image."
            )
        activations = []
        coordinates = []
        for s in range(0,
                       math.ceil(num_activations / num_activations_per_image),
                       batch_size):
            e = s + batch_size
            # acts_per_image=16&end=1003520&model=AlexNet&sampling_strategy=random&split=train&start=999424
            loaded_activations = load(
                f"gs://openai-clarity/encyclopedia/collect_activations/acts_per_image=16&end={e}&model={model.name}&sampling_strategy=random&split=train&start={s}/{op_name}-activations.npy"
            )
            loaded_coordinates = load(
                f"gs://openai-clarity/encyclopedia/collect_activations/acts_per_image=16&end={e}&model={model.name}&sampling_strategy=random&split=train&start={s}/{op_name}-image_crops.npy"
            )

            activations.append(
                loaded_activations[:, 0:num_activations_per_image, :])
            coordinates.append(
                loaded_coordinates[:, 0:num_activations_per_image, :])
        acts = np.concatenate(activations)
        flattened_acts = acts.reshape(
            (acts.shape[0] * acts.shape[1], acts.shape[2]))

        coords = np.concatenate(coordinates)
        flattened_coords = coords.reshape(
            (coords.shape[0] * coords.shape[1], coords.shape[2]))
        return flattened_acts[:num_activations,
                              ], flattened_coords[:num_activations, ]

    def load_ops(model):

        # Load the metadata info so we can get a list of the ops
        metadata = load(
            f"gs://openai-clarity/encyclopedia/graph_metadata/model={model.name}/metadata.json"
        )
        # Filter the ops list to only the ones that we are interested in
        ops = [(op_key, op['channels'])
               for op_key, op in metadata['ops'].items()
               if op['op_type'] in ('Relu', 'Conv2D') and op['rank'] == 4]
        return ops

    def bin_laid_out_activations(layout,
                                 activations,
                                 partition,
                                 grid_size,
                                 threshold=5):

        n = activations.shape[0]

        assert layout.shape[0] == activations.shape[0]
        assert n % 2 == 0

        # calculate which grid cells each activation's layout position falls into
        # first bin stays empty because nothing should be < 0, so we add an extra bin
        bins = np.linspace(0, 1, num=grid_size + 1)
        bins[-1] = np.inf  # last bin should include all higher values
        indices = np.digitize(
            layout, bins) - 1  # subtract 1 to account for empty first bin

        means_x, means_y, coordinates, counts_x, counts_y = [], [], [], [], []

        grid_coordinates = np.indices(
            (grid_size, grid_size)).transpose().reshape(-1, 2)
        for xy_coordinates in grid_coordinates:
            mask = np.equal(xy_coordinates, indices).all(axis=1)
            count_x = np.count_nonzero(mask[0:n // 2])
            count_y = np.count_nonzero(mask[n // 2:])
            if (count_x + count_y) > threshold:
                counts_x.append(count_x)
                counts_y.append(count_y)
                coordinates.append(xy_coordinates)
                means_x.append(
                    np.average(activations[0:n // 2][mask[0:n // 2]],
                               axis=0)[0:partition])
                means_y.append(
                    np.average(activations[n // 2:][mask[n // 2:]],
                               axis=0)[partition:])

        return coordinates, means_x, means_y, counts_x, counts_y

    def get_optimal_maps(X, Y):

        Σ_XX = X.transpose() @ X
        Σ_XY = X.transpose() @ Y
        Σ_YY = Y.transpose() @ Y
        Σ_YX = Σ_XY.transpose()

        A_XY = Σ_XY @ np.linalg.inv(Σ_YY)
        A_YX = Σ_YX @ np.linalg.inv(Σ_XX)

        Xhat = Y @ A_XY.transpose()
        Yhat = X @ A_YX.transpose()

        errx = np.sqrt(np.mean((Y @ A_XY.transpose() - X)**2))
        erry = np.sqrt(np.mean((X @ A_YX.transpose() - Y)**2))

        err_baseline_x = np.sqrt(np.mean((X - np.mean(X, 0))**2))
        err_baseline_y = np.sqrt(np.mean((Y - np.mean(Y, 0))**2))

        print(errx, err_baseline_x)
        print(erry, err_baseline_y)

        return A_XY, A_YX, Xhat, Yhat, (errx, err_baseline_x), (erry,
                                                                err_baseline_y)

    def dim_reduce(Z, method="umap"):

        if method == "svd":
            U, S, V = np.linalg.svd(Z, full_matrices=False)
            layout = U[:, 0:2]
            return layout

        if method == "umap":
            umap_defaults = dict(n_components=2,
                                 n_neighbors=50,
                                 min_dist=0.05,
                                 verbose=True,
                                 metric="cosine")
            layout = UMAP(**umap_defaults).fit_transform(Z)
            return layout

    def get_atlas(model, ops):

        model_x, model_y = get_model(model[0]), get_model(model[1])

        model_x.load_graphdef()
        model_y.load_graphdef()

        X = np.concatenate([
            load_activations(model_x, op, num_activations=50000)[0]
            for op in ops[0]
        ], 1).astype(np.float32)
        Y = np.concatenate([
            load_activations(model_y, op, num_activations=50000)[0]
            for op in ops[1]
        ], 1).astype(np.float32)

        A_XY, A_YX, Xhat, Yhat, errx, erry = get_optimal_maps(X, Y)

        Xc = np.concatenate([X, Yhat], axis=-1)
        Yc = np.concatenate([Xhat, Y], axis=-1)

        Z = np.concatenate([Xc, Yc])

        layout = dim_reduce(Z, method="umap")

        layout_centered = (layout - np.min(layout, 0))
        layout_centered = layout_centered / np.max(layout_centered, 0)

        coordinates, means_x, means_y, counts_x, counts_y = bin_laid_out_activations(
            layout_centered, Z, X.shape[1], 20)

        coordinates_x = np.array(coordinates)
        counts_x = np.array(counts_x)
        counts_y = np.array(counts_y)
        means_x = np.array(means_x)
        means_y = np.array(means_y)

        return coordinates, means_x, means_y, counts_x, counts_y, errx, erry, A_XY, A_YX, layout_centered

    import json
    import hashlib
    identifier = hashlib.md5(json.dumps(
        (model, ops)).encode('utf-8')).hexdigest()

    def pre_relu(name):
        if "mixed" in name:
            return (f"{name}_1x1:0", f"{name}_3x3:0", f"{name}_5x5:0",
                    f"{name}_pool_reduce:0")
        else:
            return [name + ":0"]

    coordinates, means_x, means_y, counts_x, counts_y, errx, erry, A_XY, A_YX, layout = \
        get_atlas(model, [pre_relu(ops[0]), pre_relu(ops[1])])

    plt.figure(figsize=(10, 10))
    plt.scatter(layout[0:layout.shape[0] // 2, 0],
                layout[0:layout.shape[0] // 2, 1], 1, "b")
    plt.scatter(layout[layout.shape[0] // 2:, 0], layout[layout.shape[0] // 2:,
                                                         1], 1, "r")
    plt.show()
    with write_handle(f"gs://clarity-public/ggoh/Diff/{identifier}/scatter.png"
                      ) as handle:
        plt.savefig(handle)

    manifest = {
        "model_x": model[0],
        "model_y": model[1],
        "ops_x": ops[0],
        "ops_y": ops[1],
        "coordinates": coordinates,
        "counts_x": counts_x,
        "counts_y": counts_y,
        "means_x": means_x,
        "means_y": means_y,
        "err_x": errx,
        "err_y": erry,
        "layout": layout,
        "A_XY": A_XY,
        "A_YX": A_YX,
        "identifier": identifier
    }

    print("Identifier", identifier)
    print(
        save(manifest,
             f"gs://clarity-public/ggoh/Diff/{identifier}/manifest.json"))

    del manifest["means_x"]
    del manifest["means_y"]
    del manifest["A_XY"]
    del manifest["A_YX"]

    manifest["layout"] = np.concatenate([
        layout[0:5000],
        layout[layout.shape[0] // 2:layout.shape[0] // 2 + 5000]
    ]).astype(np.float16)

    print(
        save(manifest,
             f"gs://clarity-public/ggoh/Diff/{identifier}/manifest_slim.json"))
def optimize_input(obj,
                   model,
                   param_f,
                   transforms,
                   lr=0.05,
                   step_n=512,
                   num_output_channels=4,
                   do_render=False,
                   out_name="out"):

    sess = create_session()

    # Set up optimization problem
    size = 84
    t_size = tf.placeholder_with_default(size, [])
    T = render.make_vis_T(
        model,
        obj,
        param_f=param_f,
        transforms=transforms,
        optimizer=tf.train.AdamOptimizer(lr),
    )

    tf.global_variables_initializer().run()

    if do_render:
        video_fn = out_name + '.mp4'
        writer = FFMPEG_VideoWriter(video_fn, (size, size * 4), 60.0)

    # Optimization loop
    try:
        for i in range(step_n):
            _, loss, img = sess.run([T("vis_op"), T("loss"), T("input")])

            if do_render:
                #if outputting only one channel...
                if num_output_channels == 1:
                    img = img[..., -1:]  #print(img.shape)
                    img = np.tile(img, 3)
                else:
                    #img=img[...,-3:]
                    img = img.transpose([0, 3, 1, 2])
                    img = img.reshape([84 * 4, 84, 1])
                    img = np.tile(img, 3)
                writer.write_frame(_normalize_array(img))
                if i > 0 and i % 50 == 0:
                    clear_output()
                    print("%d / %d  score: %f" % (i, step_n, loss))
                    show(img)

    except KeyboardInterrupt:
        pass
    finally:
        if do_render:
            print("closing...")
            writer.close()

    # Save trained variables
    if do_render:
        train_vars = sess.graph.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES)
        params = np.array(sess.run(train_vars), object)
        save(params, out_name + '.npy')

        # Save final image
        final_img = T("input").eval({t_size: 600})[..., -1:]  #change size
        save(final_img, out_name + '.jpg', quality=90)

    out = T("input").eval({t_size: 84})
    sess.close()
    return out