def _get(self): net_size = self._net_size # 1. get input file & its annotation fname, boxes, coded_labels = parse_annotation( self.ann_fnames[self._index], self.img_dir, self.lable_names) # 2. read image in fixed size img_augmenter = ImgAugment(net_size, net_size, self.jitter) img, boxes_ = img_augmenter.imread(fname, boxes) # 3. Append ys list_ys = _create_empty_xy(net_size, len(self.lable_names)) for original_box, label in zip(boxes_, coded_labels): max_anchor, scale_index, box_index = _find_match_anchor( original_box, self.anchors) _coded_box = _encode_box(list_ys[scale_index], original_box, max_anchor, net_size, net_size) _assign_box(list_ys[scale_index], box_index, _coded_box, label) self._index += 1 if self._index == len(self.ann_fnames): self._index = 0 self._end_epoch = True return normalize(img), list_ys[2], list_ys[1], list_ys[0]
def get(self, i): index = i % len(self.ann_fnames) fname, boxes, coded_labels = parse_annotation(self.ann_fnames[index], self.img_dir, self.lable_names) # net_size = self._get_net_size(idx) net_size = self.min_net_size list_ys = _create_empty_xy(net_size, len(self.lable_names)) # 1. get input file & its annotation # 2. read image in fixed size img_augmenter = ImgAugment(net_size, net_size, self.jitter) img, boxes_ = img_augmenter.imread(fname, boxes) # 4. Append ys for original_box, label in zip(boxes_, coded_labels): max_anchor, scale_index, box_index = _find_match_anchor( original_box, self.anchors) _coded_box = _encode_box(list_ys[scale_index], original_box, max_anchor, net_size, net_size) _assign_box(list_ys[scale_index], box_index, _coded_box, label) return normalize(img), list_ys[2], list_ys[1], list_ys[0]
def _parseAnnotation(annFile, labelNames): itemDir = os.path.split(annFile)[0] imageFile, boxes, labels = parse_annotation(annFile, itemDir, labelNames) # preserve only desired objects (boxes with required labels) boxes, labels = unzip((b, l) for b, l in zip(boxes, labels) if l > -1) return imageFile, boxes, labels
def create_generator(image_dir, annotation_dir): from yolo.dataset.annotation import parse_annotation train_anns = parse_annotation(annotation_dir, image_dir, labels_naming=["raccoon"]) generator = BatchGenerator(train_anns, anchors=[ 17, 18, 28, 24, 36, 34, 42, 44, 56, 51, 72, 66, 90, 95, 92, 154, 139, 281 ], min_net_size=288, max_net_size=288, shuffle=False) return generator
def run(self, threshold=0.5, save_dname=None): n_true_positives = 0 n_truth = 0 n_pred = 0 for ann_fname in tqdm(self._ann_fnames): img_fname, true_boxes, true_labels = parse_annotation( ann_fname, self._img_dname, self._cls_labels) true_labels = np.array(true_labels) image = cv2.imread(img_fname)[:, :, ::-1] boxes, labels, probs = self._detector.detect(image, threshold) n_true_positives += count_true_positives(boxes, true_boxes, labels, true_labels) n_truth += len(true_boxes) n_pred += len(boxes) if save_dname: self._save_img(save_dname, img_fname, image, boxes, labels, probs) return calc_score(n_true_positives, n_truth, n_pred)
def copyImageAndAnnotations(annotationFiles, imagesDir, dstDir): recreateDir(dstDir) for annotationFile in annotationFiles: imgFile, _, _ = parse_annotation(annotationFile, imagesDir, []) shutil.copy(imgFile, dstDir) shutil.copy(annotationFile, dstDir)