def test_layer_output_batched_and_context(self): input_dim = 1 context = 2 batch = 3 odim = 4 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, context, input_dim)) x = K.placeholder(shape=(batch, context, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[[0.5], [0.8]], [[0.8], [0.6]], [[0.6], [0.4]]] cc = [[1, 0.8, 0.6, 0.4]] aa = [[1, 1, 1, 1]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), context) self.assertEqual(len(vals[0][0]), odim)
def test_layer_output_batched(self): odim = 2 input_dim = 2 batch = 3 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, input_dim)) x = K.placeholder(shape=(batch, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[1, 1], [0, 0], [0.5, 0.5]] cc = [[1, 0], [1, 0]] aa = [[1 / 10, 1 / 10], [1 / 10, 1 / 10]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), odim) self.assertAlmostEqual(vals[0][0], 1, 7) self.assertAlmostEqual(vals[0][1], 0, 7) self.assertAlmostEqual(vals[1][0], 0, 7) self.assertAlmostEqual(vals[1][1], 1, 7) self.assertAlmostEqual(vals[2][0], 0.000003726653172, 7) self.assertAlmostEqual(vals[2][1], 0.000003726653172, 7)
def test_layer_output_batched2(self): odim = 2 input_dim = 2 batch = 2 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, input_dim)) x = K.placeholder(shape=(batch, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[0.5, 0.8], [0.8, 0.5]] cc = [[1, 0.2], [0.8, 0]] aa = [[1 / 2, 1 / 4], [1, 1 / 8]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), odim) self.assertAlmostEqual(vals[0][0], 0.7788007831, 7) self.assertAlmostEqual(vals[0][1], 0.00002491600973, 7) self.assertAlmostEqual(vals[1][0], 0.9394130628, 7) self.assertAlmostEqual(vals[1][1], 0.004339483271, 7)
def test_defuzzy3(self): odim = 3 input_dim = 2 batch = 2 sess = K.get_session() layer = DefuzzyLayer(odim) layer.build(input_shape=(batch, input_dim)) x = K.placeholder(shape=(batch, input_dim)) rules_outcome = K.placeholder(shape=(input_dim, odim)) layer.rules_outcome = rules_outcome xc = layer.call(x) xx = [[0.2, 0.3], [0.3, 0.2]] cc = [[1, 2, 3], [0, 1, 0]] vals = sess.run(xc, feed_dict={x: xx, rules_outcome: cc}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), odim) self.assertAlmostEqual(vals[0][0], 0.2, 7) self.assertAlmostEqual(vals[0][1], 0.7, 7) self.assertAlmostEqual(vals[0][2], 0.6, 7) self.assertAlmostEqual(vals[1][0], 0.3, 7) self.assertAlmostEqual(vals[1][1], 0.8, 7) self.assertAlmostEqual(vals[1][2], 0.9, 7)
def _generate(self, model_path): model_path = os.path.expanduser(model_path) assert model_path.endswith( ".h5"), "Keras model or weights must be a .h5 file" # load model, or construct model and load weights num_anchors = len(self.anchors) num_classes = len(self.class_names) try: self.yolo_model = tf.compat.v1.keras.models.load_model( model_path, compile=False) except: # make sure model, anchors and classes match self.yolo_model.load_weights(model_path) else: assert self.yolo_model.layers[-1].output_shape[ -1] == num_anchors / len(self.yolo_model.output) * ( num_classes + 5 ), "Mismatch between model and given anchor and class sizes" # generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = self._eval( self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score_threshold, iou_threshold=self.iou_threshold, ) return boxes, scores, classes
def do_activation(input_values, function_name, alpha=0.2): """Runs input array through activation function. :param input_values: numpy array (any shape). :param function_name: Name of activation function (must be accepted by ``). :param alpha: Slope parameter (alpha) for activation function. This applies only for eLU and ReLU. :return: output_values: Same as `input_values` but post-activation. """ architecture_utils.check_activation_function( activation_function_string=function_name, alpha_for_elu=alpha, alpha_for_relu=alpha ) input_object = K.placeholder() if function_name == architecture_utils.ELU_FUNCTION_STRING: function_object = K.function( [input_object], [layers.ELU(alpha=alpha)(input_object)] ) elif function_name == architecture_utils.RELU_FUNCTION_STRING: function_object = K.function( [input_object], [layers.LeakyReLU(alpha=alpha)(input_object)] ) else: function_object = K.function( [input_object], [layers.Activation(function_name)(input_object)] ) return function_object([input_values])[0]
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' self.yolo_model = load_model(model_path, custom_objects={'Mish': Mish}, compile=False) print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num>=2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
def _generate(self): print("_generate") model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' self.yolo_model = load_model(model_path, custom_objects={'Mish': Mish}, compile=False) print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. colors = self._random_colors(len(CLASSES)) # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num >= 2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes, colors
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' # 计算anchor数量 num_anchors = len(self.anchors) num_classes = len(self.class_names) # 载入模型,如果原来的模型里已经包括了模型结构则直接载入。 # 否则先构建模型再载入 self.yolo_model = yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes) self.yolo_model.load_weights(self.model_path) print('{} model, anchors, and classes loaded.'.format(model_path)) # 画框设置不同的颜色 hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) # 打乱颜色 np.random.seed(10101) np.random.shuffle(self.colors) np.random.seed(None) if self.eager: self.input_image_shape = Input([ 2, ], batch_size=1) inputs = [*self.yolo_model.output, self.input_image_shape] outputs = Lambda(yolo_eval, output_shape=(1, ), name='yolo_eval', arguments={ 'anchors': self.anchors, 'num_classes': len(self.class_names), 'image_shape': self.model_image_size, 'score_threshold': self.score, 'eager': True, 'max_boxes': self.max_boxes })(inputs) self.yolo_model = Model( [self.yolo_model.input, self.input_image_shape], outputs) else: self.input_image_shape = K.placeholder(shape=(2, )) self.boxes, self.scores, self.classes = yolo_eval( self.yolo_model.output, self.anchors, num_classes, self.input_image_shape, max_boxes=self.max_boxes, score_threshold=self.score, iou_threshold=self.iou)
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) is_tiny_version = num_anchors == 6 # default setting try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \ if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) self.yolo_model.load_weights( self.model_path) # make sure model, anchors and classes match else: assert self.yolo_model.layers[-1].output_shape[-1] == \ num_anchors/len(self.yolo_model.output) * (num_classes + 5), \ 'Mismatch between model and given anchor and class sizes' print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.shuffle( self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num >= 2: # https://tensorflow.google.cn/api_docs/python/tf/distribute/MirroredStrategy my_strategy = tensorflow.distribute.MirroredStrategy() with my_strategy.scope(): self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
def test_layer_output_single2(self): odim = 2 input_dim = 2 batch = 1 sess = K.get_session() layer = FuzzyLayer(odim) layer.build(input_shape=(batch, input_dim)) x = K.placeholder(shape=(batch, input_dim)) c = K.placeholder(shape=(input_dim, odim)) a = K.placeholder(shape=(input_dim, odim)) layer.c = c layer.a = a xc = layer.call(x) xx = [[0.5, 0.5]] cc = [[1, 0], [1, 0]] aa = [[1 / 2, 1 / 2], [1 / 2, 1 / 2]] vals = sess.run(xc, feed_dict={x: xx, c: cc, a: aa}) self.assertEqual(len(vals), batch) self.assertEqual(len(vals[0]), odim) self.assertAlmostEqual(vals[0][0], 0.6065306597, 7) self.assertAlmostEqual(vals[0][1], 0.6065306597, 7)
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' #---------------------------------------------------# # 计算先验框的数量和种类的数量 #---------------------------------------------------# num_anchors = len(self.anchors) num_classes = len(self.class_names) #---------------------------------------------------------# # 载入模型 #---------------------------------------------------------# self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes, self.backbone, self.alpha) self.yolo_model.load_weights(self.model_path) print('{} model, anchors, and classes loaded.'.format(model_path)) # 画框设置不同的颜色 hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) # 打乱颜色 np.random.seed(10101) np.random.shuffle(self.colors) np.random.seed(None) #---------------------------------------------------------# # 在yolo_eval函数中,我们会对预测结果进行后处理 # 后处理的内容包括,解码、非极大抑制、门限筛选等 #---------------------------------------------------------# if self.eager: self.input_image_shape = Input([2,],batch_size=1) inputs = [*self.yolo_model.output, self.input_image_shape] outputs = Lambda(yolo_eval, output_shape=(1,), name='yolo_eval', arguments={'anchors': self.anchors, 'num_classes': len(self.class_names), 'image_shape': self.model_image_size, 'score_threshold': self.score, 'eager': True, 'max_boxes': self.max_boxes, 'letterbox_image': self.letterbox_image})(inputs) self.yolo_model = Model([self.yolo_model.input, self.input_image_shape], outputs) else: self.input_image_shape = K.placeholder(shape=(2, )) self.boxes, self.scores, self.classes = yolo_eval(self.yolo_model.output, self.anchors, num_classes, self.input_image_shape, max_boxes=self.max_boxes, score_threshold=self.score, iou_threshold=self.iou, letterbox_image = self.letterbox_image)
def load_yolo(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' self.class_names = self.get_class() self.anchors = self.get_anchors() num_anchors = len(self.anchors) num_classes = len(self.class_names) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) self.sess = K.get_session() # Load model, or construct model and load weights. self.yolo4_model = yolo4_body(Input(shape=(608, 608, 3)), num_anchors // 3, num_classes) self.yolo4_model.load_weights(model_path) print('{} model, anchors, and classes loaded.'.format(model_path)) if self.gpu_num >= 2: self.yolo4_model = multi_gpu_model(self.yolo4_model, gpus=self.gpu_num) self.input_image_shape = K.placeholder(shape=(2, )) self.boxes, self.scores, self.classes = yolo_eval( self.yolo4_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score)
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = yolo_body(Input(shape=(None, None, 3)), anchors_per_level, num_classes) self.yolo_model.load_weights( self.model_path) # make sure model, anchors and classes match else: # novy output assert self.yolo_model.layers[-1].output_shape[-1] == \ num_anchors / len(self.yolo_model.output) * (num_classes + 5 + NUM_ANGLES3), \ 'Mismatch between model and given anchor and class sizes' print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num >= 2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes, polygons = yolo_eval( self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes, polygons
def load_yolo(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' self.class_names = self.get_class() self.anchors = self.get_anchors() num_anchors = len(self.anchors) num_classes = len(self.class_names) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) self.sess = tf.compat.v1.Session() # Load model, or construct model and load weights. self.yolo4_model = yolo4_body(Input(shape=(416, 416, 3)), num_anchors//3, num_classes) # Read and convert darknet weight print('Loading weights.') weights_file = open(self.weights_path, 'rb') major, minor, revision = np.ndarray( shape=(3, ), dtype='int32', buffer=weights_file.read(12)) if (major*10+minor)>=2 and major<1000 and minor<1000: seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8)) else: seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4)) print('Weights Header: ', major, minor, revision, seen) convs_to_load = [] bns_to_load = [] for i in range(len(self.yolo4_model.layers)): layer_name = self.yolo4_model.layers[i].name if layer_name.startswith('conv2d_'): convs_to_load.append((int(layer_name[7:]), i)) if layer_name.startswith('batch_normalization_'): bns_to_load.append((int(layer_name[20:]), i)) convs_sorted = sorted(convs_to_load, key=itemgetter(0)) bns_sorted = sorted(bns_to_load, key=itemgetter(0)) bn_index = 0 for i in range(len(convs_sorted)): print('Converting ', i) if i == 93 or i == 101 or i == 109: #no bn, with bias weights_shape = self.yolo4_model.layers[convs_sorted[i][1]].get_weights()[0].shape bias_shape = self.yolo4_model.layers[convs_sorted[i][1]].get_weights()[0].shape[3] filters = bias_shape size = weights_shape[0] darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype='float32', buffer=weights_file.read(filters * 4)) conv_weights = np.ndarray( shape=darknet_w_shape, dtype='float32', buffer=weights_file.read(weights_size * 4)) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) self.yolo4_model.layers[convs_sorted[i][1]].set_weights([conv_weights, conv_bias]) else: #with bn, no bias weights_shape = self.yolo4_model.layers[convs_sorted[i][1]].get_weights()[0].shape size = weights_shape[0] bn_shape = self.yolo4_model.layers[bns_sorted[bn_index][1]].get_weights()[0].shape filters = bn_shape[0] darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype='float32', buffer=weights_file.read(filters * 4)) bn_weights = np.ndarray( shape=(3, filters), dtype='float32', buffer=weights_file.read(filters * 12)) bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] # running var ] self.yolo4_model.layers[bns_sorted[bn_index][1]].set_weights(bn_weight_list) conv_weights = np.ndarray( shape=darknet_w_shape, dtype='float32', buffer=weights_file.read(weights_size * 4)) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) self.yolo4_model.layers[convs_sorted[i][1]].set_weights([conv_weights]) bn_index += 1 weights_file.close() self.yolo4_model.save(self.model_path) if self.gpu_num>=2: self.yolo4_model = multi_gpu_model(self.yolo4_model, gpus=self.gpu_num) self.input_image_shape = K.placeholder(shape=(2, )) self.boxes, self.scores, self.classes = yolo_eval(self.yolo4_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score)
def calculate_losses_from_generator(tg, model, num_steps=None, stepsize=1, verbose=0): """ Keras evaluate_generator only returns a scalar loss (mean) while predict_generator only returns the predictions but not the real labels TODO Make it batch size independent Parameters ---------- tg : object Data generator model : object Keras model num_steps : int, optional How many steps should be evaluated, by default None (runs through full experiment) stepsize : int, optional Determines how many samples will be evaluated. 1 -> N samples evaluated, 2 -> N/2 samples evaluated, etc..., by default 1 verbose : int, optional Verbosity level Returns ------- losses : (N,1) array_like Loss between predicted and ground truth observation predictions : dict Dictionary with predictions for each behaviour, each item in dict has size (N, Z) with Z the dimensions of the sample (e.g. Z_position=2, Z_speed=1, ...) indices : (N,1) array_like Indices which were evaluated, important when taking stepsize unequal to 1 """ # X.) Parse inputs if num_steps is None: num_steps = len(tg) # 1.) Make a copy and adjust attributes tmp_dict = tg.__dict__.copy() if tg.batch_size != 1: tg.batch_size = 1 tg.random_batches = False tg.shuffle = False tg.sample_size = tg.model_timesteps * tg.batch_size # 2.) Get output tensors sess = K.get_session() (_, test_out) = tg.__getitem__(0) real_tensor, calc_tensors = K.placeholder(), [] for output_index in range(0, len(test_out)): prediction_tensor = model.outputs[output_index] loss_tensor = model.loss_functions[output_index].fn( real_tensor, prediction_tensor) calc_tensors.append((prediction_tensor, loss_tensor)) # 3.) Predict losses, predictions, indices = [], [], [] for i in range(0, num_steps, stepsize): (in_tg, out_tg) = tg.__getitem__(i) indices.append(tg.cv_indices[i]) loss, prediction = [], [] for o in range(0, len(out_tg)): evaluated = sess.run(calc_tensors[o], feed_dict={ model.input: in_tg, real_tensor: out_tg[o] }) prediction.append(evaluated[0][0, ...]) loss.append(evaluated[1][0, ...]) # Get rid of batch dimensions predictions.append(prediction) losses.append(loss) if verbose > 0 and not i % 50: print('{} / {}'.format(i, num_steps), end='\r') if verbose > 0: print('Performed {} gradient steps'.format(num_steps // stepsize)) losses, predictions, indices = np.array(losses), swap_listaxes( predictions), np.array(indices) tg.__dict__.update(tmp_dict) return losses, predictions, indices
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( ".h5"), "Keras model or weights must be a .h5 file." # Load model, or construct model and load weights. start = timer() num_anchors = len(self.anchors) num_classes = len(self.class_names) is_tiny_version = num_anchors == 6 # default setting try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = (tiny_yolo_body(Input( shape=(None, None, 3)), num_anchors // 2, num_classes) if is_tiny_version else yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)) self.yolo_model.load_weights( self.model_path) # make sure model, anchors and classes match else: assert self.yolo_model.layers[-1].output_shape[ -1] == num_anchors / len(self.yolo_model.output) * ( num_classes + 5 ), "Mismatch between model and given anchor and class sizes" end = timer() print("{} model, anchors, and classes loaded in {:.2f}sec.".format( model_path, end - start)) # Generate colors for drawing bounding boxes. if len(self.class_names) == 1: self.colors = ["GreenYellow"] else: hsv_tuples = [(x / len(self.class_names), 1.0, 1.0) for x in range(len(self.class_names))] self.colors = list( map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map( lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors, )) np.random.seed( 10101) # Fixed seed for consistent colors across runs. np.random.shuffle( self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num >= 2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes = yolo_eval( self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou, ) return boxes, scores, classes