Exemplo n.º 1
0
ds_train, ds_test = data_utils.get_cached_dataset_with_normalized_inputs(
    args.ds, trainval_test=args.trainval)

model_ds = ds_train

if args.test_dataset != "":
    ds_train, ds_test = data_utils.get_cached_dataset_with_normalized_inputs(
        args.test_dataset, trainval_test=args.trainval)

# Model

print("Initializing model and loading state...")
model = model_utils.get_model(
    net_name=args.net,
    ds_train=model_ds,
    depth=args.depth,
    width=args.width,  # width factor for WRN, base_width for others
    epoch_count=1,
    dropout=args.dropout or args.mcdropout)
model.load_state(args.saved_path)

if args.view or args.hard or args.view2:
    ds_disp = ds_train if args.test_on_training_set else ds_test

    if args.hard:
        ds_disp = training.get_hard_examples(model, ds_disp)

    def predict(x):
        out_names = ['output', 'probs', 'probs_entropy']
        output, probs, probs_entropy = model.predict(x,
                                                     single_input=True,
Exemplo n.º 2
0
        'tinyimagenet-c': r'TinyImageNet-C',
        'tinyimagenet-r': r'TinyImageNet-R',
        'lsun-c': r'LSUN-C',
        'lsun-r': r'LSUN-R',
        'isun-r': r'iSUN',
        'gaussian': r'Gaussiov šum',
        'uniform': r'Uniformni šum',
    }[name]


# Model

print("Initializing model and loading state...")
model = model_utils.get_model(net_name=args.net,
                              ds_train=ds_id_to_ds[args.ds],
                              depth=args.depth,
                              width=args.width,
                              epoch_count=1,
                              dropout=args.dropout or args.mcdropout)
model.load_state(args.saved_path)

temp = 0 if args.notemp else 1000
odin = Odin(model, temp=temp)

# Missclassification

if args.misclassified:
    ds = ds_id_to_ds[args.ds]
    corr = [model.predict(x, single_input=True) == y for x, y in tqdm(ds)]
    correct = []
    misclassified = []
    for i, c in enumerate(corr):
Exemplo n.º 3
0
dsid_to_ds = {
    'CIFAR-10': ds,
    'CIFAR-10-UD': ds_ud,
    'random': ds_rand,
    'CamVid': ds_camvid
}

print("Loading model")

#ds_id, net_name, depth, width = 'cifar', 'wrn', 28, 10
#saved_path = dirs.SAVED_NETS + '/cifar-trainval/wrn-28-10/2018-04-28-1926/Model'

ds_id, net_name, depth, width = 'mozgalo', 'rn', 18, 64
saved_path = dirs.SAVED_NETS + '/mozgalo-trainval/rn-18-64-e10/2018-05-13-1747/Model'

model = model_utils.get_model(
    net_name=net_name, ds_train=ds, depth=depth, width=width)

model.load_state(saved_path)

print("Printing logit biases")
with model._graph.as_default():
    vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    logit_biases_var = list(
        filter(lambda x: x.name == 'conv_logits/bias:0', vars))[0]
    logit_biases = model._sess.run(logit_biases_var)
    print("Logit biases:", logit_biases)
    print("Logit biases sum:", logit_biases.sum())
    print("Logit biases mean:", logit_biases.mean())

print("Collecting logit statistics")