input_tensor=input_tensor)
    for i, layer in enumerate(base_model_imagenet.layers):
        # we must skip input layer, which has no weights
        if i == 0:
            continue
        base_model.layers[i+1].set_weights(layer.get_weights())

# serialize model to JSON
base_model_json = base_model.to_json()
with open("model_base.json", "w") as json_file:
    json_file.write(base_model_json)
# serialize weights to HDF5
base_model.save_weights("model_base.h5")
print("Base model saved model to disk")

base_model.save('model_base.hdf5')
print("Model saved as hdf5")

# add a global spatial average pooling layer
top_model = base_model.output
top_model = GlobalAveragePooling2D()(top_model)
# or just flatten the layers
# top_model = Flatten()(top_model)

# let's add a fully-connected layer
if use_vgg:
    # only in VGG19 a fully connected nn is added for classfication
    # DenseNet tends to overfitting if using additionally dense layers
    top_model = Dense(2048, activation='relu')(top_model)
    top_model = Dense(2048, activation='relu')(top_model)
# and a logistic layer
Example #2
0
im = cv2.resize(cv2.imread('vice.jpg'), (224, 224)).astype(np.float32)
im = im.transpose((2, 0, 1))
im = np.expand_dims(im, axis=0)

# Import model
#Weights https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
if os.path.exists('test_modelVG.h5'):
    model = load_model('test_modelVG.h5')
else:
    model = VG(include_top=True,
               weights='imagenet',
               input_tensor=None,
               input_shape=None,
               pooling=None,
               classes=1000)
    model.save('test_modelVG.h5')

#VGG_16(weights_path='weights.h5')
optimizer = SGD()
model.compile(optimizer=optimizer, loss='categorical_crossentropy')
out = model.predict(im)

index = np.argmax(out)

i = np.argsort(out)

print("Max Prediction: " + item_dict[int(index)])
print("Other predictions in order:")
ind = 1
for ind in range(5):
    name = item_dict[int(i[0][-ind - 1])]