def load_yolo(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' self.class_names = self.get_class() self.anchors = self.get_anchors() num_anchors = len(self.anchors) num_classes = len(self.class_names) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) self.sess = K.get_session() # Load model, or construct model and load weights. self.yolo4_model = yolo4_body(Input(shape=(416, 416, 3)), num_anchors//3, num_classes) self.yolo4_model.load_weights(model_path) print('{} model, anchors, and classes loaded.'.format(model_path)) if self.gpu_num>=2: self.yolo4_model = multi_gpu_model(self.yolo4_model, gpus=self.gpu_num) self.input_image_shape = K.placeholder(shape=(2, )) self.boxes, self.scores, self.classes = yolo_eval(self.yolo4_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score)
def init(): # if (country == 'KRW'): model_path = 'KRW_weight.h5' anchors_path = 'model_data/yolo4_anchors.txt' classes_path = 'model_data/KRW_classes.txt' class_names = get_class(classes_path) anchors = get_anchors(anchors_path) num_anchors = len(anchors) num_classes = len(class_names) model_image_size = (416, 416) # 分数阈值和nms_iou阈值 conf_thresh = 0.2 nms_thresh = 0.45 yolo4_model = yolo4_body(Input(shape=model_image_size + (3, )), num_anchors // 3, num_classes) model_path = os.path.expanduser(model_path) yolo4_model.load_weights(model_path) _decode = Decode(conf_thresh, nms_thresh, model_image_size, yolo4_model, class_names) # 위 과정의 시간이 오래걸림 # else: # model_path = 'JPY_weight.h5' # anchors_path = 'model_data/yolo4_anchors.txt' # classes_path = 'model_data/JPY_classes.txt' return _decode
def load_yolo(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' self.class_names = CLASSES self.anchors = np.array(anchors).reshape(-1, 2) num_anchors = len(self.anchors) num_classes = len(self.class_names) self.sess = tf.compat.v1.Session() # Load model, or construct model and load weights. self.yolo4_model = yolo4_body(Input(shape=(self.input_size, self.input_size, 3)), num_anchors//3, num_classes) # Read and convert darknet weight self.load_weights(self.yolo4_model, self.weights_path) self.yolo4_model.save(self.model_path) self.input_image_shape = K.placeholder(shape=(2, )) self.boxes, self.scores, self.classes = yolo_eval( self.yolo4_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score ) print('Dome.')
def jpy_count_coin(img): # img : str model_path = 'JPY_weight.h5' anchors_path = 'model_data/yolo4_anchors.txt' classes_path = 'model_data/JPY_classes.txt' jpy_classes = ['JPY_500', 'JPY_100', 'JPY_50', 'JPY_10', 'JPY_1', 'JPY_5'] count = {} result = {} total = 0 class_names = get_class(classes_path) anchors = get_anchors(anchors_path) num_anchors = len(anchors) num_classes = len(class_names) model_image_size = (416, 416) # 分数阈值和nms_iou阈值 conf_thresh = 0.2 nms_thresh = 0.8 yolo4_model = yolo4_body(Input(shape=model_image_size + (3, )), num_anchors // 3, num_classes) model_path = os.path.expanduser(model_path) yolo4_model.load_weights(model_path) _decode = Decode(conf_thresh, nms_thresh, model_image_size, yolo4_model, class_names) try: encoded_img = np.fromstring(base64.b64decode(img), dtype=np.uint8) img = cv2.imdecode(encoded_img, cv2.IMREAD_COLOR) except: print('Open Error! Try again!') else: image, boxes, scores, classes = _decode.detect_image(img, True) cv2.imwrite('predict.png', image) with open('predict.png', 'rb') as img: base64_string = base64.b64encode(img.read()).decode('utf-8') count = collections.Counter(classes) for key in tuple(count.keys()): # 딕셔너리 키 이름 변경 count[jpy_classes[key]] = count.pop(key) for key, value in count.items(): total += int(key[str(key).find('_') + 1:]) * value result['result'] = count result['total'] = total result['image'] = base64_string # yolo4_model.close_session() return result
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'): '''create the training model''' K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ num_anchors//3, num_classes+5)) for l in range(3)] model_body = yolo4_body(image_input, num_anchors // 3, num_classes) print('Create YOLOv4 model with {} anchors and {} classes.'.format( num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze darknet53 body or freeze all but 3 output layers. num = (250, len(model_body.layers) - 3)[freeze_body - 1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format( num, len(model_body.layers))) label_smoothing = 0 use_focal_obj_loss = False use_focal_loss = False use_diou_loss = True use_softmax_loss = False model_loss = Lambda(yolo4_loss, output_shape=(1, ), name='yolo_loss', arguments={ 'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5, 'label_smoothing': label_smoothing, 'use_focal_obj_loss': use_focal_obj_loss, 'use_focal_loss': use_focal_loss, 'use_diou_loss': use_diou_loss, 'use_softmax_loss': use_softmax_loss })([*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) is_tiny_version = num_anchors == 6 # default setting try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = yolo4_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes) self.yolo_model.load_weights( self.model_path) # make sure model, anchors and classes match else: assert self.yolo_model.layers[-1].output_shape[-1] == \ num_anchors/len(self.yolo_model.output) * (num_classes + 5), \ 'Mismatch between model and given anchor and class sizes' print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.shuffle( self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num >= 2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
def __init__(self, conf_thresh: float = 0.8, nms_thresh: float = 0.8): class_names = get_class(self.classes_path) anchors = get_anchors(self.anchors_path) model_image_size = (416, 416) self._model: keras.Model = yolo4_body( inputs=keras.Input(shape=model_image_size + (3, )), num_anchors=len(anchors) // 3, num_classes=len(class_names), ) self._model.load_weights(os.path.expanduser(self.model_path)) self._decoder: Decode = Decode( obj_threshold=conf_thresh, nms_threshold=nms_thresh, input_shape=model_image_size, _yolo=self._model, all_classes=class_names, )
def create_model(input_shape, anchors_stride_base, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'): '''create the training model''' K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors_stride_base) max_bbox_per_scale = 150 iou_loss_thresh = 0.7 model_body = yolo4_body(image_input, num_anchors, num_classes) print('Create YOLOv4 model with {} anchors and {} classes.'.format(num_anchors*3, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze darknet53 body or freeze all but 3 output layers. num = (250, len(model_body.layers)-3)[freeze_body-1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) y_true = [ layers.Input(name='input_2', shape=(None, None, 3, (num_classes + 5))), # label_sbbox layers.Input(name='input_3', shape=(None, None, 3, (num_classes + 5))), # label_mbbox layers.Input(name='input_4', shape=(None, None, 3, (num_classes + 5))), # label_lbbox layers.Input(name='input_5', shape=(max_bbox_per_scale, 4)), # true_sbboxes layers.Input(name='input_6', shape=(max_bbox_per_scale, 4)), # true_mbboxes layers.Input(name='input_7', shape=(max_bbox_per_scale, 4)) # true_lbboxes ] loss_list = layers.Lambda(yolo_loss, name='yolo_loss', arguments={'num_classes': num_classes, 'iou_loss_thresh': iou_loss_thresh, 'anchors': anchors_stride_base})([*model_body.output, *y_true]) model = Model([model_body.input, *y_true], loss_list) #model.summary() return model, model_body
def load_yolo(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' self.class_names = self.get_class() self.anchors = self.get_anchors() num_anchors = len(self.anchors) num_classes = len(self.class_names) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) self.sess = K.get_session() # Load model, or construct model and load weights. self.yolo4_model = yolo4_body(Input(shape=(608, 608, 3)), num_anchors // 3, num_classes) # Read and convert darknet weight print('Loading weights.') weights_file = open(self.weights_path, 'rb') major, minor, revision = np.ndarray(shape=(3, ), dtype='int32', buffer=weights_file.read(12)) if (major * 10 + minor) >= 2 and major < 1000 and minor < 1000: seen = np.ndarray(shape=(1, ), dtype='int64', buffer=weights_file.read(8)) else: seen = np.ndarray(shape=(1, ), dtype='int32', buffer=weights_file.read(4)) print('Weights Header: ', major, minor, revision, seen) convs_to_load = [] bns_to_load = [] for i in range(len(self.yolo4_model.layers)): layer_name = self.yolo4_model.layers[i].name if layer_name.startswith('conv2d_'): convs_to_load.append((int(layer_name[7:]), i)) if layer_name.startswith('batch_normalization_'): bns_to_load.append((int(layer_name[20:]), i)) convs_sorted = sorted(convs_to_load, key=itemgetter(0)) bns_sorted = sorted(bns_to_load, key=itemgetter(0)) bn_index = 0 for i in range(len(convs_sorted)): print('Converting ', i) if i == 93 or i == 101 or i == 109: #no bn, with bias weights_shape = self.yolo4_model.layers[ convs_sorted[i][1]].get_weights()[0].shape bias_shape = self.yolo4_model.layers[ convs_sorted[i][1]].get_weights()[0].shape[3] filters = bias_shape size = weights_shape[0] darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) conv_bias = np.ndarray(shape=(filters, ), dtype='float32', buffer=weights_file.read(filters * 4)) conv_weights = np.ndarray(shape=darknet_w_shape, dtype='float32', buffer=weights_file.read( weights_size * 4)) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) self.yolo4_model.layers[convs_sorted[i][1]].set_weights( [conv_weights, conv_bias]) else: #with bn, no bias weights_shape = self.yolo4_model.layers[ convs_sorted[i][1]].get_weights()[0].shape size = weights_shape[0] bn_shape = self.yolo4_model.layers[bns_sorted[bn_index] [1]].get_weights()[0].shape filters = bn_shape[0] darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) conv_bias = np.ndarray(shape=(filters, ), dtype='float32', buffer=weights_file.read(filters * 4)) bn_weights = np.ndarray(shape=(3, filters), dtype='float32', buffer=weights_file.read(filters * 12)) bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] # running var ] self.yolo4_model.layers[bns_sorted[bn_index][1]].set_weights( bn_weight_list) conv_weights = np.ndarray(shape=darknet_w_shape, dtype='float32', buffer=weights_file.read( weights_size * 4)) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) self.yolo4_model.layers[convs_sorted[i][1]].set_weights( [conv_weights]) bn_index += 1 weights_file.close() self.yolo4_model.save(self.model_path) if self.gpu_num >= 2: self.yolo4_model = multi_gpu_model(self.yolo4_model, gpus=self.gpu_num) self.input_image_shape = K.placeholder(shape=(2, )) self.boxes, self.scores, self.classes = yolo_eval( self.yolo4_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score)
anchors_path = 'model_data/yolo4_anchors.txt' # classes_path = 'model_data/voc_classes.txt' classes_path = 'model_data/coco_classes.txt' class_names = get_class(classes_path) anchors = get_anchors(anchors_path) num_anchors = len(anchors) num_classes = len(class_names) model_image_size = (608, 608) conf_thresh = 0.2 nms_thresh = 0.45 yolo4_model = yolo4_body(Input(shape=model_image_size + (3, )), num_anchors // 3, num_classes) model_path = os.path.expanduser(model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' yolo4_model.load_weights(model_path) _decode = Decode(conf_thresh, nms_thresh, model_image_size, yolo4_model, class_names) while True: img = input('Input image filename:') try: image = cv2.imread(img) except: