def _read_annotations(self): result = OrderedDict() for annotation in read_lines(self.csv_data_file): rows = annotation.split(' ') img_file = rows[0] if img_file not in result: result[img_file] = [] for box in rows[1:]: if len(box) < 5: continue x1, y1, x2, y2, class_name = box.split(',') x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) class_name = self.class_names[int(class_name)] result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name}) print('read annotations done') return result
def __init__(self, csv_data_file, csv_class_file, base_dir=None, **kwargs): """ Initialize a CSV data generator. Args csv_data_file: Path to the CSV annotations file. csv_class_file: Path to the CSV classes file. base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file). """ self.class_names = [] self.image_names = [] self.image_data = {} self.base_dir = base_dir self.csv_data_file = csv_data_file self.csv_class_file = csv_class_file # Take base_dir from annotations file if not explicitly specified. if self.base_dir is None: self.base_dir = os.path.dirname(csv_data_file) # parse the provided class file try: self.class_names = read_lines(csv_class_file) self.classes = {} for i, name in enumerate(self.class_names): self.classes[name] = i except ValueError as e: raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None) self.labels = {} for key, value in self.classes.items(): self.labels[value] = key # csv with img_path, x1, y1, x2, y2, class_name try: self.image_data = self._read_annotations() except ValueError as e: raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None) self.image_names = list(self.image_data.keys()) super(TextGenerator, self).__init__(**kwargs)
import numpy as np import tensorflow as tf from notekeras.model.yolo3 import Dataset, YoloDataset from notekeras.model.yolo3 import YoloBody from notekeras.utils import read_lines root = '/Users/liangtaoniu/workspace/MyDiary/notechats/notekeras/example/yolo' classes = read_lines(root + "/data/classes/coco.names") annotation_path = root + "/data/dataset/yymnist_train.txt" tf.config.experimental_run_functions_eagerly(True) def get_anchors(): # anchor = "10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326" anchor = '1.25,1.625, 2.0,3.75, 4.125,2.875, 1.875,3.8125, 3.875,2.8125, 3.6875,7.4375, 3.625,2.8125, 4.875,6.1875, 11.65625,10.1875' anchor = [float(x) for x in anchor.split(',')] return np.array(anchor).reshape(-1, 2) anchors = get_anchors() yolo_body = YoloBody(anchors=anchors, num_classes=len(classes)) yolo_body.debug() yolo_body.load_weights( "/Users/liangtaoniu/workspace/MyDiary/tmp/models/yolo/configs/yolov3.h5", freeze_body=3) train_set2 = YoloDataset(annotation_path=annotation_path, anchors=anchors, classes=classes, batch_size=4)
import hashlib import numpy as np from notekeras.model.yolo3 import YoloBody from notekeras.utils import read_lines from notemodel.database import set_weight_path set_weight_path("/Users/liangtaoniu/workspace/MyDiary/src/tianchi/live/data/weights") # import tensorflow as tf # tf.config.experimental_run_functions_eagerly(True) classes = read_lines("coco.names") def get_md5(weight): m = hashlib.md5() m.update(weight) return m.hexdigest() def get_anchors(): anchors = "10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326" # anchors = '1.25,1.625, 2.0,3.75, 4.125,2.875, 1.875,3.8125, 3.875,2.8125, 3.6875,7.4375, 3.625,2.8125, 4.875,6.1875, 11.65625,10.1875' anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) anchors = get_anchors() yolo_body1 = YoloBody(anchors=anchors, num_classes=len(classes))