# Plot ad hoc OS data instances from utils import load_labelled_patches, load_class_names import matplotlib.pyplot as plt from keras import backend as K # load 4 randomly selected 128x128 patches and their labels (X, y) = load_labelled_patches(["SU4010"], 128, limit=25, shuffle=True) # load the list of possible labels clznames = load_class_names() #if we're using the theano backend, we need to change indexing order for matplotlib to interpret the patches: if K.image_dim_ordering() == 'th': X = X.transpose(0, 3, 1, 2) # plot 4 images for i in xrange(0, 4): plt.subplot(2, 2, i + 1).set_title(clznames[y[i].argmax()]) plt.imshow(X[i]) # show the plot plt.show()
from resnet50 import ResNet50 from imagenet_utils import preprocess_input from keras.models import Model from utils import load_labelled_patches model = ResNet50(include_top=True, weights='imagenet') # Get input new_input = model.input # Find the layer to end on new_output = model.layers[-2].output # Build a new model newmodel = Model(new_input, new_output) (X, y_test_true) = load_labelled_patches(["SU4012"], 224, limit=4, shuffle=True) X = preprocess_input(X) features = newmodel.predict(X) print features.shape print features
from keras.layers import Dropout from keras.layers import Flatten from keras.layers.convolutional import Convolution2D from keras.layers.convolutional import MaxPooling2D import matplotlib.pylab as plt from utils import generate_labelled_patches, load_labelled_patches, load_class_names from keras import backend as K # define the patch size as a variable so its easier to change later. For now, # we'll set it to 28, just like the mnist images patch_size = 28 # load data train_data = generate_labelled_patches(["SU4010"], patch_size, shuffle=True) valid_data = load_labelled_patches(["SU4011"], patch_size, subcoords=((0, 0), (300, 300))) # load the class names clznames = load_class_names() num_classes = len(clznames) class DisplayMap(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): clzs = model.predict_classes(valid_data[0]) clzs = clzs.reshape((300 - patch_size, 300 - patch_size)) plt.figure() plt.imshow(clzs) plt.savefig("map_epoch%s.png" % epoch)
from keras.layers import Dropout from keras.layers import Flatten from keras.layers.convolutional import Convolution2D from keras.layers.convolutional import MaxPooling2D import matplotlib.pylab as plt from utils import generate_labelled_patches, load_labelled_patches, load_class_names from keras import backend as K # define the patch size as a variable so its easier to change later. For now, # we'll set it to 28, just like the mnist images patch_size = 28 # load data train_data = generate_labelled_patches(["SU4010"], patch_size, shuffle=True) valid_data = load_labelled_patches(["SU4011"], patch_size, limit=1000, shuffle=True) # load the class names clznames = load_class_names() num_classes = len(clznames) def larger_model(input_shape, num_classes): # create model model = Sequential() model.add( Convolution2D(30, (5, 5), padding='valid', input_shape=input_shape, activation='relu'))
new_output = Dense(num_classes)(hidden_layer) # Build a new model newmodel = Model(new_input, new_output) return newmodel model = hack_resnet(num_classes) # the resnet expects 224x224 inputs patch_size = 224 # load data train_data = generate_labelled_patches(["SU4010"], patch_size, shuffle=True) valid_data = load_labelled_patches(["SU4011"], patch_size, limit=1000, shuffle=True) # set weights in all but last layer # to non-trainable (weights will not be updated) for layer in model.layers[:len(model.layers) - 2]: layer.trainable = False # compile the model with a SGD/momentum optimizer # and a very slow learning rate. model.compile(loss='binary_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy']) # fine-tune the model model.fit_generator(train_data,