def predict_file(self, csv_file, root_dir, savedir=None): """Create a dataset and predict entire annotation file Csv file format is .csv file with the columns "image_path", "xmin","ymin","xmax","ymax" for the image name and bounding box position. Image_path is the relative filename, not absolute path, which is in the root_dir directory. One bounding box per line. Args: csv_file: path to csv file root_dir: directory of images. If none, uses "image_dir" in config savedir: Optional. Directory to save image plots. Returns: df: pandas dataframe with bounding boxes, label and scores for each image in the csv file """ self.model.eval() result = predict.predict_file(model=self.model, csv_file=csv_file, root_dir=root_dir, savedir=savedir, device=self.device, iou_threshold=self.config["nms_thresh"]) #Set labels to character from numeric result["label"] = result.label.apply( lambda x: self.numeric_to_label_dict[x]) return result
def predict_file(self, csv_file, root_dir, savedir=None, color=None, thickness=1): """Create a dataset and predict entire annotation file Csv file format is .csv file with the columns "image_path", "xmin","ymin","xmax","ymax" for the image name and bounding box position. Image_path is the relative filename, not absolute path, which is in the root_dir directory. One bounding box per line. Args: csv_file: path to csv file root_dir: directory of images. If none, uses "image_dir" in config savedir: Optional. Directory to save image plots. color: color of the bounding box as a tuple of BGR color, e.g. orange annotations is (0, 165, 255) thickness: thickness of the rectangle border line in px Returns: df: pandas dataframe with bounding boxes, label and scores for each image in the csv file """ self.model = self.model.to(self.current_device) self.model.eval() self.model.score_thresh = self.config["score_thresh"] result = predict.predict_file(model=self.model, csv_file=csv_file, root_dir=root_dir, savedir=savedir, device=self.current_device, iou_threshold=self.config["nms_thresh"], color=color, thickness=thickness) #Set labels to character from numeric result["label"] = result.label.apply(lambda x: self.numeric_to_label_dict[x]) return result
def evaluate(self, csv_file, root_dir, iou_threshold=None, savedir=None): """Compute intersection-over-union and precision/recall for a given iou_threshold Args: csv_file: location of a csv file with columns "name","xmin","ymin","xmax","ymax","label", each box in a row root_dir: location of files in the dataframe 'name' column. iou_threshold: float [0,1] intersection-over-union union between annotation and prediction to be scored true positive savedir: optional path dir to save evaluation images Returns: results: dict of ("results", "precision", "recall") for a given threshold """ # Load on GPU is available if torch.cuda.is_available(): self.model = self.model.to("cuda") self.model.eval() self.model.score_thresh = self.config["score_thresh"] predictions = predict.predict_file( model=self.model, csv_file=csv_file, root_dir=root_dir, savedir=savedir, device=self.current_device, iou_threshold=self.config["nms_thresh"]) ground_df = pd.read_csv(csv_file) ground_df["label"] = ground_df.label.apply( lambda x: self.label_dict[x]) #remove empty samples from ground truth ground_df = ground_df[~((ground_df.xmin == 0) & (ground_df.xmax == 0))] # if no arg for iou_threshold, set as config if iou_threshold is None: iou_threshold = self.config["validation"]["iou_threshold"] results = evaluate_iou.evaluate(predictions=predictions, ground_df=ground_df, root_dir=root_dir, iou_threshold=iou_threshold, savedir=savedir) #replace classes if not NUll, wrap in try catch if no predictions if not results["results"].empty: results["results"]["predicted_label"] = results["results"][ "predicted_label"].apply(lambda x: self.numeric_to_label_dict[ x] if not pd.isnull(x) else x) results["results"]["true_label"] = results["results"][ "true_label"].apply(lambda x: self.numeric_to_label_dict[x]) results["predictions"] = predictions return results
def log_images(self, pl_module): boxes = predict.predict_file(model=pl_module.model, csv_file=self.csv_file, root_dir=self.root_dir, savedir=self.savedir, device=pl_module.device) try: saved_plots = glob.glob("{}/*.png".format(self.savedir)) for x in saved_plots: pl_module.logger.experiment.log_image(x) except Exception as e: print( "Could not find logger in lightning module, skipping upload, images were saved to {}, error was rasied {}" .format(self.savedir, e))
def evaluate(self, csv_file, root_dir, iou_threshold=None, show_plot=False, savedir=None): """Compute intersection-over-union and precision/recall for a given iou_threshold Args: df: a pandas-type dataframe (geopandas is fine) with columns "name","xmin","ymin","xmax","ymax","label", each box in a row root_dir: location of files in the dataframe 'name' column. iou_threshold: float [0,1] intersection-over-union union between annotation and prediction to be scored true positive show_plot: open a blocking matplotlib window to show plot and annotations, useful for debugging. savedir: optional path dir to save evaluation images Returns: results: dict of ("results", "precision", "recall") for a given threshold """ self.model.eval() if not self.device.type == "cpu": self.model = self.model.to(self.device) predictions = predict.predict_file( model=self.model, csv_file=csv_file, root_dir=root_dir, savedir=savedir, device=self.device, iou_threshold=self.config["nms_thresh"]) predictions["label"] = predictions.label.apply( lambda x: self.numeric_to_label_dict[x]) ground_df = pd.read_csv(csv_file) # if no arg for iou_threshold, set as config if iou_threshold is None: iou_threshold = self.config["validation"]["iou_threshold"] results = evaluate_iou.evaluate(predictions=predictions, ground_df=ground_df, root_dir=root_dir, iou_threshold=iou_threshold, show_plot=show_plot) return results