def _process_slide(self, slide: Slide): slide_path = os.path.join(self.output_path, slide.name) slide.slide_path = slide_path os.mkdir(slide_path) # Draw the slide image. image = slide.image image_path = os.path.join(slide_path, "image.png") Logger.log_special("Scanning {}".format(slide.name), with_gap=True) # Predict the cell masks from an image. predict_image = image if True: equalizer_image = self.equalizer.create_equalized_image(image) predict_image = equalizer_image # Get the sample prediction. prediction = self.net.cycle_predict(predict_image, None) slide.cells = self._process_prediction(slide, slide_path, prediction) self._draw_prediction_mask(image, slide_path, prediction) pather.create("output/summary") self.reporter.produce(slide, "output/summary") cv2.imwrite(image_path, equalizer_image)
def display_stats(instances: Dict[str, int], title: str = "SOMETHING", file_name: str = "graph_name", n_display: int = 20): sorted_instances = sorted(instances.items(), key=lambda kv: kv[1]) sorted_instances.reverse() Logger.log_special(title, with_gap=True) for i in range(n_display): Logger.log_field( loader.get_label(sorted_instances[i][0]).upper(), sorted_instances[i][1]) # Create the figure. plt.style.use("ggplot") fig, ax = plt.subplots(figsize=(15, 8)) short_instances = sorted_instances[:n_display] # Labels. y_label = [loader.get_label(s[0]) for s in short_instances] y_label.insert(0, "(OTHERS)") y = np.arange(len(y_label)) # Values. x = [s[1] for s in short_instances] x.insert(0, sum([s[1] for s in sorted_instances[n_display:]])) c_map = plt.get_cmap("plasma") colors = c_map(1 - y / len(y)) colors[0] = (0.7, 0.7, 0.7, 1.0) # Plot the graph. plt.barh(y, x, height=0.5, color=colors) ax.set_yticks(y) ax.set_yticklabels(y_label) ax.invert_yaxis() ax.set_title(f"{title}: ({len(samples)} Images)") ax.set_xlabel("Count") ax.set_ylabel("Class Name") plt.savefig(f"{settings.OUTPUT_DIRECTORY}/{file_name}.png") plt.clf()
__author__ = "Jakrin Juangbhanich" __email__ = "*****@*****.**" # body_labels = ['MAN', 'WOMAN', 'PERSON', 'GIRL', 'BOY'] body_labels = ['/m/04yx4', '/m/03bt1vf', '/m/01g317', '/m/05r655', '/m/01bl7v'] # face_labels = ['HUMAN FACE', 'HUMAN HEAD'] face_labels = ['/m/0dzct', '/m/04hgtk'] # car_labels = ['Land vehicle'] car_labels = ['/m/01prls'] if __name__ == "__main__": # Load the project settings and required modules. Logger.log_special("Running Sample Analysis", with_gap=True) settings = ProjectSettings("settings.yaml") # Load the class labels. loader = Loader() loader.load_labels(settings.LABELS_FILE) # Get ALL of the samples in the directory. samples = [] sample_files = os.listdir(settings.SAMPLES_DIRECTORY) for i in sample_files[:20]: file_path = os.path.join(settings.SAMPLES_DIRECTORY, i) samples += Loader.load_sample_set_from_file(file_path) class_instances = {} class_appearances = {}
__author__ = "Jakrin Juangbhanich" __email__ = "*****@*****.**" # This is the maximum number of samples that a single 'set' will contain. MAX_SAMPLE_SET_SIZE = 5000 # Remote URLs REMOTE_IMAGE_URL_FILE = "https://requestor-proxy.figure-eight.com/figure_eight_datasets/open-images/train-images" \ "-boxable.csv " REMOTE_GROUND_TRUTH_FILE = "https://requestor-proxy.figure-eight.com/figure_eight_datasets/open-images/train" \ "-annotations-bbox.csv " if __name__ == "__main__": # Load the project settings and required modules. Logger.log_special("Running Sample Creator", with_gap=True) settings = ProjectSettings("settings.yaml") loader: Loader = Loader() # Read in the source data, and create our own sample data. Logger.log_special("Begin Sample Initialization", with_gap=True) loader.check_and_load(settings.IMAGE_URL_FILE, REMOTE_IMAGE_URL_FILE) samples = loader.create_samples(settings.IMAGE_URL_FILE) # Now that we have sample IDs and URLs, we can associate them with the GT annotations. Logger.log_special("Begin Sample Association", with_gap=True) loader.check_and_load(settings.IMAGE_URL_FILE, REMOTE_GROUND_TRUTH_FILE) loader.associate_boxes_with_samples(samples, settings.GROUND_TRUTH_FILE) # Exporting the created samples. Logger.log_special("Begin Sample Export", with_gap=True)
parser.add_argument("-n", "--sample_count", default=50, type=int, help="How many do we want to visualize?") return parser.parse_args() args = get_args() set_index = args.set_index sample_count = args.sample_count if __name__ == "__main__": # Load the project settings and required modules. Logger.log_special("Running Sample Loader", with_gap=True) settings = ProjectSettings("settings.yaml") # Load the label mapping. loader = Loader() loader.load_labels(settings.LABELS_FILE) Logger.log_field("Labels Loaded", len(loader.label_map)) # Load the samples from the set that we want. samples = Loader.load_sample_set(set_index) loaded_samples = [ s for s in samples if (s.is_locally_loaded and len(s.detect_regions) > 0) ] # How many samples loaded?
parser.add_argument("-m", "--max_threads", default=5, type=int, help="Max threads to use for loading.") return parser.parse_args() args = get_args() set_index = args.set_index max_threads = args.max_threads if __name__ == "__main__": # Load the project settings and required modules. Logger.log_special("Running Sample Loader", with_gap=True) settings = ProjectSettings("settings.yaml") set_path = os.path.join(settings.SAMPLES_DIRECTORY, f"sample_set_{set_index}.json") if not os.path.exists(set_path): Logger.log_field( "Error", "No file found at {}. Have you created the samples using cmd_create_samples yet?" ) exit(1) Logger.log_special("Begin Sample Image Download", with_gap=True) samples = Loader.load_sample_set_from_file(set_path) unloaded_samples = [s for s in samples if not s.is_locally_loaded] n_unloaded_samples = len(unloaded_samples)