def create_test_data(folder): test_data_path = os.path.join(folder, DATA_DIR, 'test') total = get_total_images(test_data_path) imgs = np.ndarray((total, IMAGE_ROWS, IMAGE_COLS, 3), dtype=np.uint8) imgs_id = np.ndarray((total, ), dtype=object) i = 0 print_text('Creating test images.') for subfolder in os.listdir(test_data_path): test_data_path_subfolder = os.path.join(test_data_path, subfolder, '*.jpg') images = glob.glob(test_data_path_subfolder) for image_path in images: image_name = image_path.split('\\')[-1] if 'mask' in image_name: continue imgs[i], _ = get_image_and_mask(image_path) imgs_id[i] = image_name.split('.')[0] if i % 10 == 0: print('Done: {0}/{1} images'.format(i, total)) i += 1 print_text('Loading done.') np.save(os.path.join(folder, 'tl_test.npy'), imgs) np.save(os.path.join(folder, 'tl_mask_test.npy'), imgs_id) print_text('Saving to .npy files done.')
def create_train_data(folder, use_basic_augmentation): train_data_path = os.path.join(folder, DATA_DIR, 'train') increment = 2 if use_basic_augmentation else 4 total = get_total_images(train_data_path) * increment // 2 imgs = np.ndarray((total, IMAGE_ROWS, IMAGE_COLS, 3), dtype=np.uint8) imgs_mask = np.ndarray((total, IMAGE_ROWS, IMAGE_COLS, 3), dtype=np.uint8) print_text('Creating training images.') i = 0 for subfolder in os.listdir(train_data_path): train_data_path_subfolder = os.path.join(train_data_path, subfolder, '*.jpg') images = glob.glob(train_data_path_subfolder) for image_path in images: image_name = image_path.split('\\')[-1] if 'mask' in image_name: continue imgs[i], imgs_mask[i] = get_image_and_mask(image_path) imgs[i + 1] = change_brightness(imgs[i]) imgs_mask[i + 1] = imgs_mask[i] imsave(os.path.join(TEMP_DIR, 'brightness_' + image_name), imgs[i + 1]) if not use_basic_augmentation: imgs[i + 2] = add_motion_blur(imgs[i]) imgs_mask[i + 2] = imgs_mask[i] imsave(os.path.join(TEMP_DIR, 'blur_' + image_name), imgs[i + 2]) imgs[i + 3] = add_motion_blur(imgs[i + 1]) imgs_mask[i + 3] = imgs_mask[i] imsave(os.path.join(TEMP_DIR, 'both_' + image_name), imgs[i + 3]) if i % 10 == 0: print('Done: {0}/{1} images'.format(i, total)) i += increment print_text('Loading done.') np.save(os.path.join(folder, 'tl_train.npy'), imgs) np.save(os.path.join(folder, 'tl_mask_train.npy'), imgs_mask) print_text('Saving to .npy files done.')
def train_and_predict(): print_text('Loading and pre-processing train data.') images_train, images_mask_train = load_train_data() images_train = pre_process(images_train) images_mask_train = pre_process(images_mask_train) images_train = images_train.astype('float32') mean = np.mean(images_train) # mean for data centering std = np.std(images_train) # std for data normalization images_train -= mean images_train /= std images_mask_train = images_mask_train.astype('float32') images_mask_train /= 255. # scale masks to [0, 1] print_text('Creating and compiling model.') model = get_unet(IMG_ROWS, IMG_COLS) model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True) print_text('Fitting model.') model.fit(images_train, images_mask_train, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1, shuffle=True, validation_split=VALID_SPLIT, callbacks=[model_checkpoint]) print_text('Loading and pre-processing test data.') images_test, images_id_test = load_test_data() images_test = pre_process(images_test) images_test = images_test.astype('float32') images_test -= mean images_test /= std print_text('Loading saved weights.') model.load_weights('weights.h5') print_text('Predicting masks on test data.') images_mask_test = model.predict(images_test, verbose=1) np.save('images_mask_test.npy', images_mask_test) print_text('Saving predicted masks to files.') if not os.path.exists(PRED_DIR): os.mkdir(PRED_DIR) for image, image_id in zip(images_mask_test, images_id_test): image = (image[:, :, 0] * 255.).astype(np.uint8) imsave(os.path.join(PRED_DIR, str(image_id) + '_pred.png'), image)
def train_and_predict(parent_folder): print_text('Loading and pre-processing train data.') imgs_train, imgs_mask_train = load_train_data(parent_folder) imgs_train = pre_process(imgs_train) imgs_mask_train = pre_process(imgs_mask_train) imgs_train = imgs_train.astype('float32') mean = np.mean(imgs_train) # mean for data centering std = np.std(imgs_train) # std for data normalization if parent_folder == "carla": imgs_train -= mean imgs_train /= std imgs_mask_train = imgs_mask_train.astype('float32') imgs_mask_train /= 255. # scale masks to [0, 1] tf_board = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True) print_text('Creating and compiling model.') model = unet_model(parent_folder) file_model_name = 'tl_model_detector_' + str(parent_folder) + '.json' file_weights_name = 'tl_weights_detector_' + str(parent_folder) + '.h5' model_checkpoint = ModelCheckpoint(os.path.join(MODEL_DIR, file_weights_name), monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=1) # serialize model to JSON model_json = model.to_json() with open(os.path.join(MODEL_DIR, file_model_name), "w") as json_file: json_file.write(model_json) print_text('Saved model to disk') print_text('Fitting model.') model.fit(imgs_train, imgs_mask_train, batch_size=16, epochs=130, verbose=1, shuffle=True, validation_split=0.2, callbacks=[model_checkpoint, tf_board]) print_text('Loading and pre-processing test data.') imgs_test, test_image_names = load_test_data(parent_folder) imgs_test = pre_process(imgs_test) imgs_test = imgs_test.astype('float32') if parent_folder == "carla": imgs_test -= mean imgs_test /= std print_text('Loading saved weights.') model.load_weights(os.path.join(MODEL_DIR, file_weights_name)) print_text('Predicting masks on test data.') predicted_image_masks = model.predict(imgs_test, verbose=1) print_text('Saving predicted masks to files.') if not os.path.exists(PREDS_DIR): os.mkdir(PREDS_DIR) for image_mask, image_name in zip(predicted_image_masks, test_image_names): image_mask = (image_mask[:, :, 0] * 255.).astype(np.uint8) imsave(os.path.join(PREDS_DIR, image_name + '.pred.png'), image_mask)
def predict(parent_folder): print_text('Loading and pre-processing test data.') model = unet_model(parent_folder) test_images, test_image_names = load_test_data(parent_folder) test_images = pre_process(test_images) test_images = test_images.astype('float32') mean = np.mean(test_images) # mean for data centering std = np.std(test_images) # std for data normalization if parent_folder == "carla": test_images -= mean test_images /= std print_text('Loading saved weights.') model_json_name = 'tl_model_detector_' + str(parent_folder) + '.json' model_name = 'tl_weights_detector_' + str(parent_folder) + '.h5' print_text(model_json_name) model.load_weights(os.path.join(MODEL_DIR, model_name)) # serialize model to JSON model_json = model.to_json() with open(os.path.join(MODEL_DIR, model_json_name), "w") as json_file: json_file.write(model_json) print_text('Saved model to disk') print_text('Predicting masks on test data.') predicted_image_masks = model.predict(test_images, verbose=1) print_text('Saving predicted masks to files.') if not os.path.exists(PRED_DIR): os.mkdir(PRED_DIR) for pred_image, image_name in zip(predicted_image_masks, test_image_names): pred_image = (pred_image[:, :, 0] * 255.).astype(np.uint8) imsave(os.path.join(PRED_DIR, image_name + '.pred.png'), pred_image)