Ejemplo n.º 1
0
            'weight': ...,
            'contr': ...
        }
    }
    ]
    """
    return [{
        name: {
            "weight": weights[name][i],
            "contr": contrs[name][i]
        }
        for name in weights.keys()
    } for i in range(len(layernames))]


model = loadmodel(hook_feature, hook_modules=hook_modules)
fo = NeuronOperator()

# ==== STEP 1: Feature extraction ====
# features: list of activations - one 63305 x c x h x w tensor for each feature
# layer (defined by settings.FEATURE_NAMES; default is just layer4)
# maxfeature: the maximum activation across the input map for each channel (e.g. for layer 4, there is a 7x7 input map; what's the max value). one 63305 x c tensor for each feature
features, maxfeature, preds, logits = fo.feature_extraction(model=model)

# ==== STEP 2: Threshold quantization ====
thresholds = [
    fo.quantile_threshold(lf, savepath=f"quantile_{ln}.npy")
    for lf, ln in zip(features, layernames)
]

# ==== Average feature activation quantiling ====
Ejemplo n.º 2
0
    random = np.random.seed(args.seed)

    args = parser.parse_args()

    os.makedirs(save_dir, exist_ok=True)
    tutil.save_args(args, save_dir)

    datasets = load_ade20k(
        data_dir, random_state=random, max_classes=5 if args.debug else None
    )
    dataloaders = {
        s: to_dataloader(d, batch_size=args.batch_size, num_workers=args.workers) for s, d in datasets.items()
    }

    # Always load pretrained
    model = loadmodel(None, pretrained_override=args.pretrained)
    # Replace the last layer
    n_classes = datasets["train"].n_classes
    if settings.MODEL == "resnet18":
        model.fc = nn.Linear(512, n_classes)
    elif settings.MODEL == "resnet101":
        model.fc = nn.Linear(2048, n_classes)
    elif settings.MODEL == "alexnet":
        model.classifier[-1] = nn.Linear(4096, n_classes)
    elif settings.MODEL == "vgg16":
        model.classifier[-1] = nn.Linear(4096, n_classes)
    else:
        raise NotImplementedError


    # Re-move the model on/off GPU
Ejemplo n.º 3
0
import os
import settings
from loader.model_loader import loadmodel
from util.feature_operation import FeatureOperator
from util.clean import clean
from util.feature_decoder import SingleSigmoidFeatureClassifier
from util.image_operation import *
from PIL import Image
import numpy as np
from scipy.misc import imresize, imread
from visualize.plot import random_color
from torch.autograd import Variable as V
import torch

model = loadmodel()
fo = FeatureOperator()

features, _ = fo.feature_extraction(model=model)

for layer_id, layer in enumerate(settings.FEATURE_NAMES):
    feat_clf = SingleSigmoidFeatureClassifier(feature=features[layer_id],
                                              layer=layer,
                                              fo=fo)
    feat_clf.load_snapshot(14, unbiased=True)

    if not settings.GRAD_CAM:
        fo.weight_decompose(model,
                            feat_clf,
                            feat_labels=[l['name'] for l in fo.data.label])

    with open(settings.DATASET_INDEX_FILE) as f:
Ejemplo n.º 4
0
import settings
from loader.model_loader import loadmodel
from feature_operation import hook_feature, FeatureOperator
from visualize.report import generate_html_summary
from util.clean import clean

fo = FeatureOperator()
model = loadmodel(hook_feature)

############ STEP 1: feature extraction ###############
features, maxfeature = fo.feature_extraction(model=model)

for layer_id, layer in enumerate(settings.FEATURE_NAMES):
    ############ STEP 2: calculating threshold ############
    thresholds = fo.quantile_threshold(features[layer_id],
                                       savepath="quantile.npy")

    ############ STEP 3: calculating IoU scores ###########
    tally_result = fo.tally(features[layer_id],
                            thresholds,
                            savepath="tally.csv")

    ############ STEP 4: generating results ###############
    generate_html_summary(fo.data,
                          layer,
                          tally_result=tally_result,
                          maxfeature=maxfeature[layer_id],
                          features=features[layer_id],
                          thresholds=thresholds)
    if settings.CLEAN:
        clean()
import settings
from loader.model_loader import loadmodel
from feature_operation import hook_feature, hook_inputconv_feature, FeatureOperator
from visualize.report import generate_html_summary
from util.clean import clean

fo = FeatureOperator()

if settings.INPUT_CONV:
    model = loadmodel(hook_inputconv_feature)
else:
    model = loadmodel(hook_feature)
    
############ STEP 1: feature extraction ###############
features, maxfeature = fo.feature_extraction(model=model)

for layer_id,layer in enumerate(settings.FEATURE_NAMES):
############ STEP 2: calculating threshold ############
    thresholds = fo.quantile_threshold(features[layer_id],savepath="quantile.npy")

############ STEP 3: calculating IoU scores ###########
    tally_result = fo.tally(features[layer_id],thresholds,savepath="tally.csv")

############ STEP 4: generating results ###############
    generate_html_summary(fo.data, layer,
                          tally_result=tally_result,
                          maxfeature=maxfeature[layer_id],
                          features=features[layer_id],
                          thresholds=thresholds)
    if settings.CLEAN:
        clean()
Ejemplo n.º 6
0
import pandas as pd

from loader.data_loader import ade20k
from loader.data_loader.broden import normalize_image
from loader.model_loader import loadmodel
from adversarial_examples.make_size_position import EXAMPLES, SIZE_GRID, XY_GRID

if __name__ == "__main__":
    from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter

    parser = ArgumentParser(description=__doc__,
                            formatter_class=ArgumentDefaultsHelpFormatter)

    args = parser.parse_args()

    model = loadmodel(None)

    transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.ToTensor()])

    records = []
    for orig_fname, _, orig_class, new_class, mask_class in EXAMPLES:
        orig_basename = os.path.splitext(orig_fname)[0]
        for x_pos in XY_GRID:
            for y_pos in XY_GRID:
                for size in SIZE_GRID:
                    new_fname = f'{orig_basename}_{x_pos}_{y_pos}_{size}.jpg'
                    full_fname = os.path.join('adversarial_examples',
                                              'size_position', new_fname)