depth_multiplier=1, dropout=0.001, include_top=False, weights="imagenet", input_shape=(224, 224, 3)) model = GlobalAveragePooling2D()(base_model.output) model = Dropout(0.001)(model) output_layer = Dense(n_classes, activation='softmax')(model) model = Model(base_model.input, output_layer) # Load saved weights model.load_weights(savepath + 'weights.h5', by_name=True) # Make predictions preds = model.predict(prediction_generator) preds_class_indices = preds.argmax(axis=-1) # Convert labels and predictions to dictionaries labels = dict((v, k) for k, v in classes.items()) predictions = [labels[k] for k in preds_class_indices] # Put them together with the filenames into a dataframe filenames = prediction_generator.filenames results = pd.DataFrame({"Filename": filenames, "Prediction": predictions}) print('') print(results) print('') print('')
output_layer = Dense(6, activation='softmax')(model) model = Model(base_model.input, output_layer) # Load saved weights model.load_weights(savepath + 'weights.h5', by_name=True) # Load and preprocess image img = image.load_img(imagepath, target_size=(224, 224)) z = image.img_to_array(img) z = np.expand_dims(z, axis=0) z = preprocess_input(z) # Make prediction preds = model.predict(z) maximum_model_output = model.output[:, 0] last_conv_layer = model.layers[83] # Pooled grads of last convolutional layer and iterate over image grads = K.gradients(model.output[:, 0], last_conv_layer.output)[0] pooled_grads = K.mean(grads, axis=(0, 1, 2)) iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]]) pooled_grads_value, conv_layer_output_value = iterate([z]) # Extract 768 pooled grads of last convolutional layer for i in range(768): conv_layer_output_value[:, :, i] *= pooled_grads_value[i]