def load_datasets(args): if args.matchedTraing2ndDataDir == '': # Load the 1 dataset # Load dataset API (Alread logged in the args log step) train_dataset = dataset_cvppp.Fine_Tune_CVPPP_Dataset(blur_images=args.blurImages, num_imgs=args.numSamples) train_dataset.load_cvppp(args.dataDir, 'train') train_dataset.prepare() crossVal_dataset = dataset_cvppp.Fine_Tune_CVPPP_Dataset(blur_images=args.blurImages) # Use all for crossVal crossVal_dataset.load_cvppp(args.dataDir, 'crossVal') crossVal_dataset.prepare() else: # Load both print("Using matched Training") train_dataset1 = dataset_cvppp.CVPPP_Dataset(blur_images=args.blurImages) train_dataset1.load_cvppp(args.dataDir, 'train') train_dataset1.prepare() crossVal_dataset1 = dataset_cvppp.CVPPP_Dataset(blur_images=args.blurImages) crossVal_dataset1.load_cvppp(args.dataDir, 'crossVal') crossVal_dataset1.prepare() train_dataset2 = dataset_cvppp.CVPPP_Dataset(blur_images=args.blurImages) train_dataset2.load_cvppp(args.matchedTraing2ndDataDir, 'train') train_dataset2.prepare() crossVal_dataset2 = dataset_cvppp.CVPPP_Dataset(blur_images=args.blurImages) crossVal_dataset2.load_cvppp(args.matchedTraing2ndDataDir, 'crossVal') crossVal_dataset2.prepare() train_dataset = (train_dataset1, train_dataset2) crossVal_dataset = (crossVal_dataset1, crossVal_dataset2) return train_dataset, crossVal_dataset
def run_example_augmentations(): """ Function which can be called to visualise some example augmentations and ensure that they are valid This uses the image loading function inside maskRCNN so the user can be confident in the data being fed to the network """ parser = argparse.ArgumentParser( description='Visualise example augmentations') parser.add_argument( '--dataDir', type=str, required=True, help= 'Directory containing training data stored in the expected format. See dataset_cvppp.py' ) parser.add_argument('--outputDir', type=str, required=True, help='Directory to save example images to') parser.add_argument('--numImages', type=int, default=30, help='How many images to save') parser.add_argument('--blurImages', dest='blurImages', action='store_true') parser.add_argument('--dontBlurImages', dest='blurImages', action='store_false') parser.set_defaults(blurImages=True) args = parser.parse_args() # Create output dir assert not os.path.isdir(args.outputDir), "output dir already exists" os.mkdir(args.outputDir) # # Init dataset train_dataset = dataset_cvppp.CVPPP_Dataset() train_dataset.load_cvppp(args.dataDir, 'train') train_dataset.prepare() # Init config configuration = config_cvppp.TrainConfig() # Init augmentation augmentation = get_augmentation_sequence() # Generate images for i in range(args.numImages): image, meta, class_ids, bbox, mask = model.load_image_gt( train_dataset, configuration, i, augmentation=augmentation) rgb_mask = mask_to_rgb(mask) im_path = os.path.join(args.outputDir, str(i) + '_image.png') mask_path = os.path.join(args.outputDir, str(i) + '_mask.png') io.imsave(im_path, image) io.imsave(mask_path, rgb_mask) print("Saved example", i)
def evaluate_model(): """ The main evaluation procedure """ args = arguments() # Create output dir assert not os.path.isdir(args.outputDir), "output dir already exists" os.mkdir(args.outputDir) # Init config configuration = config_cvppp.InferenceConfig() # Init model inference_model = model.MaskRCNN(mode="inference", config=configuration, model_dir=args.outputDir) assert os.path.exists( args.weightsPath), "Weights file does not exist at " + args.weightsPath inference_model.load_weights(args.weightsPath, by_name=True) # Load dataset API test_dataset = dataset_cvppp.CVPPP_Dataset() if os.path.isdir(os.path.join(args.dataDir, 'test')): test_dataset.load_cvppp(args.dataDir, 'test') else: test_dataset.load_cvppp(args.dataDir, '') # Assume it is just this directory test_dataset.prepare() # init metrics dice = MetricTracker() DiC = MetricTracker() abs_DiC = MetricTracker() mAP = MetricTracker() # save predictions with open(os.path.join(args.outputDir, RESULTS_FILENAME), 'a') as results_file: results_file.write("Filename, Path, mAP, SBD \n") for image_id in test_dataset.image_ids: # Activate the choice of image test_dataset.image_reference(image_id) image, image_meta, gt_class_id, gt_bbox, gt_mask =\ model.load_image_gt(test_dataset, configuration, image_id, use_mini_mask=False) image_path = test_dataset.image_reference(image_id)['path'] # Run inference results = inference_model.detect([image], verbose=False) r = results[0] results_file.write( os.path.basename(image_path) + ', ' + image_path + ', ') # mAP @ IoU 0.5 metric mAPResult, precisions, recalls, overlaps = \ utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) mAP.add(mAPResult, 0) results_file.write(str(mAPResult)[:6] + ', ') # Dice metric dice_res, dice_std = CvpppSymmetricBestDice.calculate( r['masks'], gt_mask) dice.add(dice_res, dice_std) results_file.write(str(dice_res)[:6] + '\n') # Save visualisation of prediction save_path = os.path.join(args.outputDir, os.path.basename(image_path)) visualise_prediction(image, r['masks'], gt_mask, save_path) # save overall results with open(os.path.join(args.outputDir, OVERALL_RESULTS_FILENAME), 'a') as results_file: results_file.write("Metric, Result, STD\n") overall_mAP = mAP.calc_mean() results_file.write("mAP, " + str(overall_mAP[0])[:6] + ', ' + str(overall_mAP[1])[:6] + '\n') overall_dice = dice.calc_mean() results_file.write("Symmetric Best Dice, " + str(overall_dice[0])[:6] + ', ' + str(overall_dice[1])[:6] + '\n')