Exemple #1
0
def compare_files(file1, file2):
    """
    Helper function to compare two serialized model files

    This is only comparing the model weights and states and layer
    config parameters

    Returns:
        bool: True if the two file match
    """
    models = []
    for fn in [file1, file2]:
        assert os.path.exists(fn), 'Could not find file %s' % fn

        with open(fn, 'r') as fid:
            models.append(ModelDescription(pickle.load(fid)))

    return models[0] == models[1]
Exemple #2
0
conv_weights_fn = os.path.join(model_path, 'caffenet_conv.pkl')
with open(conv_weights_fn, 'w') as fid:
    pickle.dump(pdict, fid)

pdict = {}
del pdict

# deserialize and run the model in neon
# generate a backend
be = gen_backend(backend='gpu', rng_seed=1, batch_size=256)

with open(conv_weights_fn, 'r') as fid:
    pdict_l = pickle.load(fid)

# for testing we need to switch dropout to keep=1.0
md = ModelDescription(pdict_l)

# get the dropout values set in the serialized file and reset them to
# have no dropout for comparison between caffe
drop_layers = {ky: -1 for ky in ["drop6", "drop7"]}
for l in drop_layers:
    drop_layer = md.getlayer(l)
    drop_layers[l] = drop_layer['config']['keep']
    drop_layer['config']['keep'] = 1.0
md = dict(md)

# generate a fake input
IM_SIZE = (be.bsz, 3, 224, 224)
np.random.seed(1)
im = np.random.randint(-150, 150, IM_SIZE)
fake_labels = np.zeros((IM_SIZE[0], 10))
    pickle.dump(pdict, fid)

pdict = {}
del pdict

# deserialize and run the model in neon
# generate a backend
be = gen_backend(backend='gpu', rng_seed=1, batch_size=64)

with open(conv_file_path, 'r') as fid:
    pdict_l = pickle.load(fid)

# for testing we need to switch dropout to keep=1.0
# get the dropout values set in the serialized file and reset them to
# have no dropout for comparison between caffe 
md = ModelDescription(pdict_l)

drop_layers = {ky: -1 for ky in ["loss1/drop_fc", "loss2/drop_fc", "pool5/drop_7x7_s1"]}
for l in drop_layers:
    drop_layer = md.getlayer(l)
    drop_layers[l] = drop_layer['config']['keep']
    drop_layer['config']['keep'] = 1.0
md = dict(md)

# generate a fake input
IM_SIZE = (be.bsz, 3, 224, 224)
np.random.seed(1)
im = np.random.randint(-150, 150, IM_SIZE)
fake_labels = np.zeros((IM_SIZE[0], 10))

# need this iterator to initialize the model
              nclasses=c)
data_dir = args.image_path

test_set = PixelWiseImageLoader(set_name='test',
                                repo_dir=data_dir,
                                media_params=test_params,
                                index_file=os.path.join(
                                    data_dir, 'test_images.csv'),
                                **common)

# initialize model object
segnet_model = Model(layers=gen_model(c, h, w))
segnet_model.initialize(test_set)

# load up the serialized model
model_desc = ModelDescription(load_obj(args.save_model_file))
for layer in segnet_model.layers_to_optimize:
    name = layer.name
    trained_layer = model_desc.getlayer(name)
    layer.load_weights(trained_layer)

fig = plt.figure()
if args.display:
    plt.ion()

im1 = None
im2 = None

cnt = 1
for x, t in test_set:
    z = segnet_model.fprop(x).get()
Exemple #5
0
def compare_model_pickles(fn1, fn2):
    print 'comparing pickle files %s and %s' % (fn1, fn2)
    model1 = ModelDescription(fn1)
    model2 = ModelDescription(fn2)
    return ModelDescription.match(model1, model2)
                          scale_min=min(h, w), scale_max=min(h, w),
                          aspect_ratio=0, **shape)
common = dict(target_size=h*w, target_conversion='read_contents',
              onehot=False, target_dtype=np.uint8, nclasses=c)
data_dir = args.image_path

test_set = PixelWiseImageLoader(set_name='test', repo_dir=data_dir,media_params=test_params, 
                                index_file=os.path.join(data_dir, 'test_images.csv'), **common)


# initialize model object
segnet_model = Model(layers=gen_model(c, h, w))
segnet_model.initialize(test_set)

# load up the serialized model
model_desc = ModelDescription(load_obj(args.save_model_file))
for layer in segnet_model.layers_to_optimize:
    name = layer.name
    trained_layer = model_desc.getlayer(name)
    layer.load_weights(trained_layer)

fig = plt.figure()
if args.display:
    plt.ion()

im1 = None
im2 = None

cnt = 1
for x, t in test_set:
    z = segnet_model.fprop(x).get()
    clabels = list(np.loadtxt(caffe_synset, str, delimiter='\t'))
    # needed when using synset_words
    # clabels = [ ' '.join(x.split(' ')[1:]) for x in clabels]

    lbl_map = []
    for lbl in neon_synsets[0:1000]:
        ind = clabels.index(lbl)
        lbl_map.append(ind)
    lbl_map = np.array(lbl_map)

    with open('neon_caffe_label_map.pkl', 'w') as fid:
        pickle.dump(lbl_map, fid)
    print 'Wrote mapping to neon_caffe_label_map.pkl'

print 'loading model file %s' % args.model_file
model = ModelDescription(args.model_file)

def find_output_layer(check_lay):
    layers = []
    for ind in range(len(check_lay)-1, -1, -1):
        if check_lay[ind]['type'].find('Linear') > -1:
            layers.append(check_lay[ind]['config']['name'])
            break
        if check_lay[ind]['type'].find('Bias') > -1:
            layers.append(check_lay[ind]['config']['name'])
            if ind > 0 and check_lay[ind-1]['type'].find('Linear') > -1:
                layers.append(check_lay[ind-1]['config']['name'])
            break
    print 'found following layers: '
    print layers
    return layers