def classify(model, test_dir): """ Classifies all images in test_dir :param model: Model to be evaluated :param test_dir: Directory including the images :return: None """ test_img_gen = image_generator.flow_from_directory( test_dir, target_size=(IMG_SIZE, IMG_SIZE), classes=LABELS, batch_size=1, shuffle=False, ) ######### Your code starts here ######### # Classify all images in the given folder # Calculate the accuracy and the number of test samples in the folder # test_img_gen has a list attribute filenames where you can access the # filename of the datapoint # break after 1 epoch #pdb.set_trace() ######### Your code ends here ######### print(f"Evaluated on {num_test} samples.") print(f"Accuracy: {accuracy*100:.0f}%")
def get_bottleneck_dataset(model, img_dir, img_size): # image_generator is of type ImageDataGenerator # Find more information here # https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator?version=stable train_img_gen = image_generator.flow_from_directory( img_dir, target_size=(img_size, img_size), shuffle=False, # this is important for accessing filenames classes=LABELS, batch_size=1) bottelneck_x_l = [] bottelneck_y_l = [] for i in range(train_img_gen.samples): ######### Your code starts here ######### # We want to create a dataset of bottleneck data. # You can get the next image input and one-hot encoded label via x_i, y_i = next(train_img_gen) # For each iteration append the bottleneck output as well as the label to the respective list # bottelneck_x_l -> list of tensors with dimension [1, bottleneck_size] # bottelneck_y_l -> list of tensors with dimension [1, num_labels] # Fill in the parts indicated by #FILL#. No additional lines are required. x_i, y_i = next(train_img_gen) bottelneck_x = model(x_i) bottelneck_x_l.append(bottelneck_x) bottelneck_y_l.append(y_i) ######### Your code ends here ######### bottelneck_ds = tf.data.Dataset.from_tensor_slices( (np.vstack(bottelneck_x_l), np.vstack(bottelneck_y_l))) return bottelneck_ds, train_img_gen.samples
def classify(model, test_dir): """ Classifies all images in test_dir :param model: Model to be evaluated :param test_dir: Directory including the images :return: None """ test_img_gen = image_generator.flow_from_directory(test_dir, target_size=(IMG_SIZE, IMG_SIZE), classes=LABELS, batch_size=1, shuffle=False) ######### Your code starts here ######### # Classify all images in the given folder # Calculate the accuracy and the number of test samples in the folder # test_img_gen has a list attribute filenames where you can access the filename of the datapoint num_test = test_img_gen.samples correct_count = 0 for i in range(num_test): x_i, y_i = next(test_img_gen) y_pred = model(x_i).numpy() if np.argmax(y_pred[0]) == np.argmax(y_i[0]): correct_count += 1 else: print(test_img_gen.filenames[i]) ######### Your code ends here ######### accuracy = correct_count / num_test print(f"Evaluated on {num_test} samples.") print(f"Accuracy: {accuracy*100:.0f}%")
def get_bottleneck_dataset(model, img_dir, img_size): # image_generator is of type ImageDataGenerator # Find more information here # https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator?version=stable train_img_gen = image_generator.flow_from_directory(img_dir, target_size=(img_size, img_size), shuffle=False, # this is important for accessing filenames classes=LABELS, batch_size=1) bottelneck_x_l = [] bottelneck_y_l = [] for i in range(train_img_gen.samples): ######### Your code starts here ######### # We want to create a dataset of bottleneck data. # You can get the next image input and one-hot encoded label via x_i, y_i = next(train_img_gen) # For each iteration append the bottleneck output as well as the label to the respective list # bottelneck_x_l -> list of tensors with dimension [1, bottleneck_size] # bottelneck_y_l -> list of tensors with dimension [1, num_labels] # Fill in the parts indicated by #FILL#. No additional lines are required. ######### Your code ends here ######### bottelneck_ds = tf.data.Dataset.from_tensor_slices((np.vstack(bottelneck_x_l), np.vstack(bottelneck_y_l))) return bottelneck_ds, train_img_gen.samples def retrain(image_dir): # Create the base model from the pre-trained model InceptionV3 base_model = tf.keras.applications.InceptionV3(input_shape=IMG_SHAPE, include_top=False, pooling='avg', weights='imagenet') base_model.summary() base_model.compile(loss='mse') print("Generating Bottleneck Dataset... this may take some minutes.") bottleneck_train_ds, num_train = get_bottleneck_dataset(base_model, img_dir=image_dir, img_size=IMG_SIZE) train_batches = bottleneck_train_ds.shuffle(10000).batch(BATCH_SIZE).repeat() print("Done generating Bottleneck Dataset") ######### Your code starts here ######### # We want to create a linear classifier which takes the bottleneck data as input # 1. Get the size of the bottleneck tensor. Hint: You can get the shape of a tensor via tensor.get_shape().as_list() # 2. Define a new tf.keras Model which is a linear classifier # 2.1 Define a keras Input (retrain_input) # 2.2 Define the trainable layer (retrain_layer) # 2.3 Define the activation function (retrain activation) # 2.4 Create a new model # 3. Define a loss and a evaluation metric # Fill in the parts indicated by #FILL#. No additional lines are required. ######### Your code ends here ######### retrain_model.compile(optimizer=tf.keras.optimizers.SGD(lr=lr), loss=loss, metrics=[metric]) retrain_model.summary() EPOCHS = 1 steps_per_epoch = 5000 tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='./retrain_logs', update_freq='batch') retrain_model.fit(train_batches, epochs=EPOCHS, steps_per_epoch=steps_per_epoch, callbacks=[tensorboard_callback]) ######### Your code starts here ######### # We now want to create the full model using the newly trained classifier # Use tensorflow keras Sequential to stack the base_model and the new layers # Fill in the parts indicated by #FILL#. No additional lines are required. ######### Your code ends here ######### model.compile(loss=loss, metrics=[metric]) maybe_makedirs('./trained_models') model.save('./trained_models/trained.h5') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--image_dir', type=str) FLAGS, _ = parser.parse_known_args() retrain(FLAGS.image_dir)