Exemple #1
0
def save_lucid_model(config, params, *, model_path, metadata_path):
    config = config.copy()
    config.pop("num_envs")
    library = config.get("library", "baselines")
    venv = create_env(1, **config)
    arch = get_arch(**config)

    with tf.Graph().as_default(), tf.Session() as sess:
        observation_space = venv.observation_space
        observations_placeholder = tf.placeholder(shape=(None, ) +
                                                  observation_space.shape,
                                                  dtype=tf.float32)

        if library == "baselines":
            from baselines.common.policies import build_policy

            with tf.variable_scope("ppo2_model", reuse=tf.AUTO_REUSE):
                policy_fn = build_policy(venv, arch)
                policy = policy_fn(
                    nbatch=None,
                    nsteps=1,
                    sess=sess,
                    observ_placeholder=(observations_placeholder * 255),
                )
                pd = policy.pd
                vf = policy.vf

        else:
            raise ValueError(f"Unsupported library: {library}")

        load_params(params, sess=sess)

        Model.save(
            model_path,
            input_name=observations_placeholder.op.name,
            output_names=[pd.logits.op.name, vf.op.name],
            image_shape=observation_space.shape,
            image_value_range=[0.0, 1.0],
        )

    metadata = {
        "policy_logits_name": pd.logits.op.name,
        "value_function_name": vf.op.name,
        "env_name": config.get("env_name"),
        "gae_gamma": config.get("gamma"),
        "gae_lambda": config.get("lambda"),
    }
    env = venv
    while hasattr(env, "env") and (not hasattr(env, "combos")):
        env = env.env
    if hasattr(env, "combos"):
        metadata["action_combos"] = env.combos
    else:
        metadata["action_combos"] = None

    save_joblib(metadata, metadata_path)
    return {
        "model_bytes": read(model_path, cache=False, mode="rb"),
        **metadata
    }
Exemple #2
0
def SvelteComponent(name, path):
    """Display svelte components in iPython.
  
  Args:
    name: name of svelte component (must match component filename when built)
    path: path to compile svelte .js file or source svelte .html file.
      (If html file, we try to call svelte and build the file.)
  
  Returns:
    A function mapping data to a rendered svelte component in ipython.
  """
    if path[-3:] == ".js":
        js_path = path
    elif path[-5:] == ".html":
        print("Trying to build svelte component from html...")
        js_path = build_svelte(path)
    js_content = read(js_path)

    def inner(data):
        id_str = name + "_" + hex(random.randint(0, 1e8))[2:]
        html = _template \
            .replace("$js", js_content) \
            .replace("$name", name) \
            .replace("$data", json.dumps(data)) \
            .replace("$id", id_str)
        _display_html(html)

    return inner
Exemple #3
0
def load_graphdef(model_url, reset_device=True):
    """Load GraphDef from a binary proto file."""
    graphdef_string = read(model_url)
    graph_def = tf.GraphDef.FromString(graphdef_string)
    if reset_device:
        for n in graph_def.node:
            n.device = ""
    return graph_def
Exemple #4
0
def test_read_remote_url(mocker):
    path = "https://example.com/example.html"
    golden = b"42"
    mock_urlopen = mocker.patch('urllib.request.urlopen',
                                return_value=io.BytesIO(golden))

    content = read(path, cache=False)

    mock_urlopen.assert_called_once_with(path)
    assert content == golden
Exemple #5
0
def load_graphdef(model_url, reset_device=True):
  """Load GraphDef from a binary proto file."""
  graphdef_string = read(model_url)
  try:
    graph_def = tf.GraphDef.FromString(graphdef_string)
  except DecodeError:
    cache_path = local_cache_path(model_url)
    log.error("Could not decode graphdef protobuf. Maybe check if you have a corrupted file cached at {}?".format(cache_path))
    raise RuntimeError('Could not load_graphdef!')

  if reset_device:
    for n in graph_def.node:
      n.device = ""

  return graph_def
Exemple #6
0
def test_read_binary_file():
    path = "./tests/fixtures/bytes"
    content = read(path)
    golden_content = io.open(path, 'rb').read()
    assert content == golden_content
Exemple #7
0
def test_read_txt_file():
    content = read(path, encoding='utf-8')
    assert content == string
Exemple #8
0
from lucid.misc.io import show
import lucid.misc.io.showing as showing
from lucid.misc.channel_reducer import ChannelReducer
import lucid.optvis.param as param
import lucid.optvis.objectives as objectives
import lucid.optvis.render as render
from lucid.misc.io import show, load
from lucid.misc.io.reading import read
from lucid.misc.io.serialize_array import serialize_array
from lucid.misc.io.showing import _image_url
from lucid.misc.gradient_override import gradient_override_map
import os

model = models.InceptionV1()
model.load_graphdef()
labels_str = read("map_clsloc.txt", encoding='utf8')
labels = [line[line.find(" "):].strip() for line in labels_str.split("\n")]
labels = [label[label.find(" "):].strip().replace("_", " ") for label in labels]
labels = ["dummy"] + labels


def raw_class_group_attr(img, layer, label, group_vecs, override=None, ):
    """How much did spatial positions at a given layer effect a output class?"""

    # Set up a graph for doing attribution...
    with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
        t_input = tf.placeholder_with_default(img, [None, None, 3])
        T = render.import_model(model, t_input, t_input)

        # Compute activations
        acts = T(layer).eval()
Exemple #9
0
def load(
    checkpoint_path,
    *,
    resample=True,
    model_path=None,
    metadata_path=None,
    trajectories_path=None,
    observations_path=None,
    trajectories_kwargs={},
    observations_kwargs={},
    full_resolution=False,
    temp_files=False,
    coinrun_aisc=False,
):
    """
    if coinrun_aisc is specified, then deploy on modified env where coin spawns at random
    spot, even if the saved model config specifies a different environment.
    """
    if temp_files:
        default_path = lambda suffix: tempfile.mkstemp(suffix=suffix)[1]
    else:
        path_stem = re.split(r"(?<=[^/])\.[^/\.]*$", checkpoint_path)[0]
        path_stem = os.path.join(os.path.dirname(path_stem), "rl-clarity",
                                 os.path.basename(path_stem))
        default_path = lambda suffix: path_stem + suffix
    if model_path is None:
        model_path = default_path(".model.pb")
    if metadata_path is None:
        metadata_path = default_path(".metadata.jd")
    if trajectories_path is None:
        trajectories_path = default_path(".trajectories.jd")
    if observations_path is None:
        observations_path = default_path(".observations.jd")

    if resample:
        trajectories_kwargs.setdefault("num_envs", 8)
        trajectories_kwargs.setdefault("num_steps", 512)
        observations_kwargs.setdefault("num_envs", 32)
        observations_kwargs.setdefault("num_obs", 128)
        observations_kwargs.setdefault("obs_every", 128)

        checkpoint_dict = load_joblib(checkpoint_path, cache=False)
        config = checkpoint_dict["args"]
        if coinrun_aisc:
            config['env_name'] = 'coinrun_aisc'
        print()
        print("///////////////////////////////////////////")
        print("Environment specified:", config['env_name'])
        print("///////////////////////////////////////////")
        print()
        if full_resolution:
            config["render_human"] = True
        if config.get("use_lstm", 0):
            raise ValueError(
                "Recurrent networks not yet supported by this interface.")
        params = checkpoint_dict["params"]
        config["coinrun_old_extra_actions"] = 0
        if config.get("env_name") == "coinrun_old":
            # we may need to add extra actions depending on the size of the policy head
            policy_bias_keys = [
                k for k in checkpoint_dict["params"] if k.endswith("pi/b:0")
            ]
            if policy_bias_keys:
                [policy_bias_key] = policy_bias_keys
                (num_actions,
                 ) = checkpoint_dict["params"][policy_bias_key].shape
                if num_actions == 9:
                    config["coinrun_old_extra_actions"] = 2

        return {
            **save_lucid_model(config,
                               params,
                               model_path=model_path,
                               metadata_path=metadata_path),
            **save_observations(
                config,
                params,
                observations_path=observations_path,
                num_envs=observations_kwargs["num_envs"],
                num_obs=observations_kwargs["num_obs"],
                obs_every=observations_kwargs["obs_every"],
                full_resolution=full_resolution,
            ),
            **save_trajectories(
                config,
                params,
                trajectories_path=trajectories_path,
                num_envs=trajectories_kwargs["num_envs"],
                num_steps=trajectories_kwargs["num_steps"],
                full_resolution=full_resolution,
            ),
        }

    else:
        observations = load_joblib(observations_path, cache=False)
        if not isinstance(observations, dict):
            observations = {"observations": observations}
        return {
            "model_bytes":
            read(model_path, cache=False, mode="rb"),
            **observations,
            "trajectories":
            load_joblib(trajectories_path, cache=False),
            **load_joblib(metadata_path, cache=False),
        }
def main():
    # Import a model from the lucid modelzoo
    # Or transform tensorflow slim model from https://github.com/tensorflow/models/tree/master/research/slim
    # the lucid library help you download model automatically.
    model = models.InceptionV1()
    model.load_graphdef()

    # labels_str = read("https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt")
    # labels_str = labels_str.decode("utf-8")
    # labels = [line[line.find(" "):].strip() for line in labels_str.split("\n")]
    # labels = [label[label.find(" "):].strip().replace("_", " ") for label in labels]
    # labels = ["dummy"] + labels
    labels_str = read(model.labels_path)
    labels_str = labels_str.decode("utf-8")
    labels = [line for line in labels_str.split("\n")]

    # layers = ["InceptionV1/Mixed_5c/concat", "InceptionV1/Mixed_5b/concat",
    #                 "InceptionV1/Mixed_4f/concat", "InceptionV1/Mixed_4e/concat",
    #                 "InceptionV1/Mixed_4d/concat", "InceptionV1/Mixed_4c/concat",
    #                 "InceptionV1/Mixed_4b/concat", "InceptionV1/Mixed_3b/concat",
    #                 "InceptionV1/Conv2d_2b_1x1/Relu", "InceptionV1/MaxPool_3a_3x3/MaxPool",
    #                 ]
    # layers = ["mixed5b", "mixed5a", "mixed4e", "mixed4d", "mixed4c",
    #           "mixed4b", "mixed4a", "mixed3b", "mixed3a", "maxpool1"]

    # factorization_methods = ['DictionaryLearning', 'FactorAnalysis', 'FastICA', 'IncrementalPCA',
    #                          'LatentDirichletAllocation', 'MiniBatchDictionaryLearning',
    #                          'MiniBatchSparsePCA', 'NMF', 'PCA', 'SparsePCA',
    #                          'TruncatedSVD']
    # factorization_methods = ['KernelPCA', 'SparseCoder', 'dict_learning', 'dict_learning_online', 'fastica']

    layers = ["mixed4d"]
    factorization_methods = ['FactorAnalysis']

    # attr_classes = ['Egyptian cat', 'golden retriever']
    # attr_classes = ['laptop', 'quilt']  # ['Labrador retriever', 'tennis ball', 'tiger cat']
    # attr_classes = ['tiger cat', 'Labrador retriever']
    # ('Labrador retriever', 'golden retriever')
    # [11.319051   9.532383]
    # ('Labrador retriever', 'golden retriever')
    # [8.349452  8.214619 ]
    attr_classes = ['golden retriever']

    global_random_seed = 5
    image_size = 224  # 224

    # whether load the pre-computed feature attribution
    flag_read_attr = True
    # Shapley value computing method, "Shap" or "IGSG"
    flag1 = "IGSG"
    # iteration times for computing Shapley values
    iter_num = 100
    # pos_flag=1 means only compute positive Shapley
    # = 2 means consider both positive and negative Shapley
    pos_flag = 2
    img_name = "./data/dog_cat224.jpg"

    # ---------------------------------------------------------------------------------------------------
    neuron_groups(img_name,
                  layers,
                  model,
                  attr_classes=attr_classes,
                  factorization_methods=factorization_methods,
                  flag1=flag1,
                  flag_read_attr=flag_read_attr,
                  iter_num=iter_num,
                  SG_path=False,
                  labels=labels,
                  pos_flag=pos_flag,
                  thres_explained_var=0.7,
                  vis_random_seed=global_random_seed,
                  image_size=image_size)
Exemple #11
0
def load_text_labels(labels_path):
    return read(labels_path, encoding='utf-8').splitlines()
Exemple #12
0
    #   "spritemap2": image_url_grid(attrs.transpose(2,3,0,1)),
    #   "size1": attrs.shape[3],
    #   "layer1": layer1,
    #   "size2": attrs.shape[0],
    #   "layer2": layer2,
    #   "img" : _image_url(img),
    #   "hint1": _image_url(hint1),
    #   "hint2": _image_url(hint2)
    # })


model = models.InceptionV1()
model.load_graphdef()

labels_str = read(
    "https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt"
)
labels = [line[line.find(" "):].strip() for line in labels_str.split("\n")]
labels = [
    label[label.find(" "):].strip().replace("_", " ") for label in labels
]
labels = ["dummy"] + labels
"""# Simple Attribution Example"""

img = load(
    "https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png"
)

spatialAttr = spatialAttribution()
spatial_spatial_attr(img,
                     "mixed4d",
Exemple #13
0
def main():
    # Import a model from the lucid modelzoo
    # Or transform tensorflow slim model from https://github.com/tensorflow/models/tree/master/research/slim
    # the lucid library help you download model automatically.
    model = models.ResnetV1_50_slim()
    model.load_graphdef()

    # labels_str = read("https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt")
    # labels_str = labels_str.decode("utf-8")
    # labels = [line[line.find(" "):].strip() for line in labels_str.split("\n")]
    # labels = [label[label.find(" "):].strip().replace("_", " ") for label in labels]
    # labels = ["dummy"] + labels
    labels_str = read(model.labels_path)
    labels_str = labels_str.decode("utf-8")
    labels = [line for line in labels_str.split("\n")]

    # factorization_methods = ['DictionaryLearning', 'FactorAnalysis', 'FastICA', 'IncrementalPCA',
    #                          'LatentDirichletAllocation', 'MiniBatchDictionaryLearning',
    #                          'MiniBatchSparsePCA', 'NMF', 'PCA', 'SparsePCA',
    #                          'TruncatedSVD']
    # factorization_methods = ['NMF', 'LatentDirichletAllocation']
    # factorization_methods = ['KernelPCA', 'SparseCoder', 'dict_learning', 'dict_learning_online', 'fastica']
    '''
  input_name = 'input'
  # In ResNetV1, each add (joining the residual branch) is followed by a Relu
  # this seems to be the natural "layer" position
  ResnetV1_50_slim.layers = _layers_from_list_of_dicts(ResnetV1_50_slim, [
    {'tags': ['conv'], 'name': 'resnet_v1_50/conv1/Relu', 'depth': 64},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block1/unit_1/bottleneck_v1/Relu', 'depth': 256},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block1/unit_2/bottleneck_v1/Relu', 'depth': 256},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block1/unit_3/bottleneck_v1/Relu', 'depth': 256},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block2/unit_1/bottleneck_v1/Relu', 'depth': 512},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block2/unit_2/bottleneck_v1/Relu', 'depth': 512},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block2/unit_3/bottleneck_v1/Relu', 'depth': 512},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block2/unit_4/bottleneck_v1/Relu', 'depth': 512},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block3/unit_1/bottleneck_v1/Relu', 'depth': 1024},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block3/unit_2/bottleneck_v1/Relu', 'depth': 1024},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block3/unit_3/bottleneck_v1/Relu', 'depth': 1024},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block3/unit_4/bottleneck_v1/Relu', 'depth': 1024},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block3/unit_5/bottleneck_v1/Relu', 'depth': 1024},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block3/unit_6/bottleneck_v1/Relu', 'depth': 1024},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block4/unit_1/bottleneck_v1/Relu', 'depth': 2048},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block4/unit_2/bottleneck_v1/Relu', 'depth': 2048},
    {'tags': ['conv'], 'name': 'resnet_v1_50/block4/unit_3/bottleneck_v1/Relu', 'depth': 2048},
    {'tags': ['dense'], 'name': 'resnet_v1_50/predictions/Softmax', 'depth': 1000},
  ])
  '''
    layers = [
        'resnet_v1_50/block4/unit_1/bottleneck_v1/Relu',
        'resnet_v1_50/block3/unit_1/bottleneck_v1/Relu'
    ]
    factorization_methods = ['FactorAnalysis']

    # attr_classes = ['Egyptian cat', 'golden retriever']
    # attr_classes = ['laptop', 'quilt']  # ['Labrador retriever', 'tennis ball', 'tiger cat']
    # attr_classes = ['tiger cat', 'Labrador retriever']
    # ('Labrador retriever', 'golden retriever')
    # [11.319051   9.532383]
    # ('Labrador retriever', 'golden retriever')
    # [8.349452  8.214619 ]
    attr_classes = ['Egyptian cat', 'Labrador retriever']  # 'Egyptian cat',

    global_random_seed = 5
    image_size = 224  # 224

    # whether load the pre-computed feature attribution
    flag_read_attr = False
    # Shapley value computing method, "Shap" or "IGSG"
    flag1 = "IGSG"
    # iteration times for computing Shapley values
    iter_num = 100
    # pos_flag=1 means only compute positive Shapley
    # = 2 means consider both positive and negative Shapley
    pos_flag = 1
    # img_name = "./data/adv_samples/golden retriever_adv_dog_cat224_2.jpg"
    img_name = "./data/dog_cat224.jpg"
    # ---------------------------------------------------------------------------------------------------
    neuron_groups(img_name,
                  layers,
                  model,
                  attr_classes=attr_classes,
                  factorization_methods=factorization_methods,
                  flag1=flag1,
                  flag_read_attr=flag_read_attr,
                  iter_num=iter_num,
                  SG_path=False,
                  labels=labels,
                  pos_flag=pos_flag,
                  thres_explained_var=0.7,
                  vis_random_seed=global_random_seed,
                  image_size=image_size)
Exemple #14
0
def graph_of(f):
    the_string = read(f'./data/vars/{f}.pb')
    graph_def = tf.GraphDef.FromString(the_string)

    return graph_def
Exemple #15
0
def unpack_graph_def(f):

    the_string = read(f'./data/vars/{f}.pb')
    graph_def = tf.GraphDef.FromString(the_string)
    output = tf.import_graph_def(graph_def, return_elements=['out:0'])
    return graph_def