def visualize_detection(path_to_image: str): """ Visualizes object-detections and corresponding acceptance probability predictions for an image. Args: path_to_image: Image on which detections and predictions will be visualized. """ det = send_od_request(path_to_image) pred = accept_prob_predictor.main('predict', detections=det) image_id = os.path.splitext(os.path.basename(path_to_image))[0] img = Image.open(path_to_image) colors = ['blue', 'green', 'red', 'orange', 'brown', 'black', 'turquoise'] width, height = img.size draw = ImageDraw.Draw(img) for i, det in enumerate(det[image_id]): color = colors[i % len(colors)] x_min = det['XMin'] * width y_min = det['YMin'] * height x_max = det['XMax'] * width y_max = det['YMax'] * height draw.text(xy=(x_min, y_min), text=det['LabelName'], fill=color) draw.text(xy=((x_min + x_max) / 2, (y_min + y_max) / 2), text=str(int(pred[i])), fill=color) for offset in [-1, 0, 1]: draw.rectangle(xy=[x_min + offset, y_min + offset, x_max + offset, y_max + offset], outline=color) img.show()
def update_predictions(batch_id): """ Creates and returns predictions for all images contained in a batch whether predictions already exist in the instance directory or not. Args: batch_id: id of the batch for which the predictions will be generated """ predictions = [] batch = ImageBatch.query.filter_by(id=batch_id).all() batch_data = image_batch_schema.dump(batch, many=True) for task in batch_data[0]['tasks']: img_path = get_path_to_image(task['id']) pred_path = get_path_to_prediction(task['id']) prediction = send_od_request(img_path) prediction = list(prediction.values())[0] if len(prediction) > 0: feature_vectors = [] for i, _ in enumerate(prediction): feature_vectors.append(compute_feature_vector(prediction[i])) acceptance_prediction = send_accept_prob_request(feature_vectors) for i, p in enumerate(acceptance_prediction): prediction[i]['acceptance_prediction'] = p prediction.sort(key=lambda p: p['acceptance_prediction'], reverse=True) predictions.append({'id': str(task['id']), 'predictions': prediction}) if not os.path.exists(os.path.dirname(pred_path)): os.mkdir(os.path.dirname(pred_path)) with open(pred_path, 'w') as f: json.dump(prediction, f) return jsonify(predictions)
def serve_predictions(): """Serves predictions for all images from the instance folder""" predictions = [] img_batches = ImageBatch.query.options(db.joinedload('tasks')).all() image_batch_data = image_batch_schema.dump(img_batches, many=True) for batch in image_batch_data: for task in batch['tasks']: img_path = get_path_to_image(task['id']) pred_path = get_path_to_prediction(task['id']) if os.path.exists(pred_path): with open(pred_path, 'r') as f: predictions.append({'id': str(task['id']), 'predictions': json.load(f)}) else: prediction = send_od_request(img_path) prediction = list(prediction.values())[0] if len(prediction) > 0: feature_vectors = [] for i, _ in enumerate(prediction): feature_vectors.append(compute_feature_vector(prediction[i])) acceptance_prediction = send_accept_prob_request(feature_vectors) for i, p in enumerate(acceptance_prediction): prediction[i]['acceptance_prediction'] = p prediction.sort(key=lambda p: p['acceptance_prediction'], reverse=True) predictions.append({'id': str(task['id']), 'predictions': prediction}) if not os.path.exists(os.path.dirname(pred_path)): os.mkdir(os.path.dirname(pred_path)) with open(pred_path, 'w') as f: json.dump(prediction, f) return jsonify(predictions)
def compute_map(path_to_test_images: str, path_to_gt: str, map_at: int): """ Computes the mAP at a given valuefor a given test set and and the current state of the object detector. (modify alpha in settings if necessary) Args: path_to_test_images: test images path_to_gt: Ground-Truth-Data for the test images map_at: decides at which value the mAP is computed """ gt_reader = GroundTruthReader(path_to_gt) oid_classcode_reader = OIDClassCodeReader() mAP = 0 nr_examples = 0 for image in os.listdir(path_to_test_images): image_id = os.path.splitext(image)[0] ground_truth = gt_reader.get_ground_truth_annotation(image_id) detections = send_od_request(os.path.join(path_to_test_images, image)) for d in detections: nr_examples += 1 correct = 0 for single_det in detections[d][:map_at]: oid_class_code = oid_classcode_reader.get_code_for_human_readable_class( single_det['LabelName']) for g in [ gt for gt in ground_truth if gt['LabelName'] == oid_class_code ]: gt_bb = { 'XMin': g['XMin'], 'YMin': g['YMin'], 'XMax': g['XMax'], 'YMax': g['YMax'] } det_bb = { 'XMin': single_det['XMin'], 'YMin': single_det['YMin'], 'XMax': single_det['XMax'], 'YMax': single_det['YMax'] } iou = compute_iou(gt_bb, det_bb) if iou > alpha: correct += 1 break mAP += correct / map_at return mAP / nr_examples
def create_detection_record(path_to_images: str, path_to_json=None): """ Create and save detections from an object-detection model for a set of images. Args: path_to_images: Path to a directory which contains the images to be analysed. path_to_json: Path for the new file or to an existent json to which the detections will be appended. """ if path_to_json is None: images = os.listdir(path_to_images) result = {} timestamp = datetime.now().strftime('%Y_%m_%d_%H%M%S') path_to_json = os.path.join( annotation_predictor_metadata_dir, 'detection_record_{}.json'.format(timestamp)) with open(path_to_json, 'w') as f: json.dump(result, f) else: with open(path_to_json, 'r') as f: result = json.load(f) images = [] for image in os.listdir(path_to_images): image_id = os.path.splitext(image)[0] if image_id not in result.keys(): images.append(image) total_images = len(images) for i, image in enumerate(images): if (i % 100) == 0: with open(path_to_json, 'w') as f: json.dump(result, f) print('Evaluated {} of {} images'.format(i, total_images)) path_to_image = os.path.join(path_to_images, image) try: Image.open(path_to_image).convert('RGB') except (IOError, OSError): continue result.update(send_od_request(path_to_image))