def creat_yolo_model(num_classes, weight_path=None): print('load model...') yolo = yolo_body(num_classes) if weight_path: yolo.load_weight(weight_path) print('load {} successed!'.format(weight_path)) return yolo
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'): '''create the training model''' K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ num_anchors//3, num_classes+5)) for l in range(3)] model_body = yolo_body(image_input, num_anchors // 3, num_classes) print('Create YOLOv3 model with {} anchors and {} classes.'.format( num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze darknet53 body or freeze all but 3 output layers. num = (185, len(model_body.layers) - 3)[freeze_body - 1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format( num, len(model_body.layers))) model_loss = Lambda(yolo_loss, output_shape=(1, ), name='yolo_loss', arguments={ 'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5 })([*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) is_tiny_version = num_anchors==6 # default setting try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = tiny_yolo_body(Input(shape=(None,None,1)), num_anchors//2, num_classes) \ if is_tiny_version else yolo_body(Input(shape=(None,None,1)), num_anchors//3, num_classes) self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match else: assert self.yolo_model.layers[-1].output_shape[-1] == \ num_anchors/len(self.yolo_model.output) * (num_classes + 5), \ 'Mismatch between model and given anchor and class sizes' print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num>=2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=0., iou_threshold=0.) return boxes, scores, classes
import torch import torch.nn as nn import torch.nn.functional as F from yolo import model import time import cv2 import numpy as np from yolo.utility import convert_yolo_outputs,convert_ground_truth,resize,get_input_data import matplotlib.pyplot as plt torch.cuda.empty_cache() start1 = time.time() yolo = model.yolo_body(80) yolo.load_weight('yolov3_state_dict.pt') yolo.eval() yolo.cuda() print('load time:',time.time()-start1) start2 = time.time() image = cv2.imread('messi.jpg') image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB) resize_image,ratio = resize(image,(416,416)) image_data = get_input_data(resize_image) image_data = np.expand_dims(image_data,0) X =torch.from_numpy(image_data) X = X.cuda() with torch.no_grad(): out_puts = yolo(X) print('processing time:',time.time()-start2) start3 = time.time() #输出转换 with open('name.txt','r') as f:
from yolo.generators.pascal import PascalVocGenerator from yolo.model import yolo_body import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' common_args = {'batch_size': 1, 'image_size': 416} test_generator = PascalVocGenerator('datasets/voc_test/VOC2007', 'test', shuffle_groups=False, skip_truncated=False, skip_difficult=True, anchors_path='voc_anchors_416.txt', **common_args) model_path = 'pascal_18_6.4112_6.5125_0.8319_0.8358.h5' num_classes = test_generator.num_classes() model, prediction_model = yolo_body(num_classes=num_classes) prediction_model.load_weights(model_path, by_name=True, skip_mismatch=True) average_precisions = evaluate(test_generator, prediction_model, visualize=False) # compute per class average precision total_instances = [] precisions = [] for label, (average_precision, num_annotations) in average_precisions.items(): print('{:.0f} instances of class'.format(num_annotations), test_generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision)) total_instances.append(num_annotations) precisions.append(average_precision) mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'): '''create the training model''' K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ num_anchors//3, num_classes+5)) for l in range(3)] model_body = yolo_body(image_input, num_anchors // 3, num_classes) print('Create YOLOv3 model with {} anchors and {} classes.'.format( num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze darknet53 body or freeze all but 3 output layers. num = (185, len(model_body.layers) - 3)[freeze_body - 1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format( num, len(model_body.layers))) # get output of second last layers and create bottleneck model of it out1 = model_body.layers[246].output out2 = model_body.layers[247].output out3 = model_body.layers[248].output bottleneck_model = Model([model_body.input, *y_true], [out1, out2, out3]) # create last layer model of last layers from yolo model in0 = Input(shape=bottleneck_model.output[0].shape[1:].as_list()) in1 = Input(shape=bottleneck_model.output[1].shape[1:].as_list()) in2 = Input(shape=bottleneck_model.output[2].shape[1:].as_list()) last_out0 = model_body.layers[249](in0) last_out1 = model_body.layers[250](in1) last_out2 = model_body.layers[251](in2) model_last = Model(inputs=[in0, in1, in2], outputs=[last_out0, last_out1, last_out2]) model_loss_last = Lambda(yolo_loss, output_shape=(1, ), name='yolo_loss', arguments={ 'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5 })([*model_last.output, *y_true]) last_layer_model = Model([in0, in1, in2, *y_true], model_loss_last) model_loss = Lambda(yolo_loss, output_shape=(1, ), name='yolo_loss', arguments={ 'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5 })([*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model, bottleneck_model, last_layer_model
summary_value.simple_value = result summary_value.tag = '{}. {}'.format(index + 1, coco_tag[index]) self.tensorboard.writer.add_summary(summary, epoch) logs[coco_tag[index]] = result if __name__ == '__main__': dataset_dir = '/home/adam/.keras/datasets/coco/2017_118_5' test_generator = CocoGenerator( anchors_path='yolo/yolo_anchors.txt', data_dir=dataset_dir, set_name='test-dev2017', shuffle_groups=False, ) input_shape = (416, 416) model, prediction_model = yolo_body(test_generator.anchors, num_classes=80) model.load_weights('yolo/checkpoints/yolov3_weights.h5', by_name=True) coco_eval_stats = evaluate_coco(test_generator, model) coco_tag = [ 'AP @[ IoU=0.50:0.95 | area= all | maxDets=100 ]', 'AP @[ IoU=0.50 | area= all | maxDets=100 ]', 'AP @[ IoU=0.75 | area= all | maxDets=100 ]', 'AP @[ IoU=0.50:0.95 | area= small | maxDets=100 ]', 'AP @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]', 'AP @[ IoU=0.50:0.95 | area= large | maxDets=100 ]', 'AR @[ IoU=0.50:0.95 | area= all | maxDets= 1 ]', 'AR @[ IoU=0.50:0.95 | area= all | maxDets= 10 ]', 'AR @[ IoU=0.50:0.95 | area= all | maxDets=100 ]', 'AR @[ IoU=0.50:0.95 | area= small | maxDets=100 ]', 'AR @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]', 'AR @[ IoU=0.50:0.95 | area= large | maxDets=100 ]'
def main(args=None): # parse arguments if args is None: args = sys.argv[1:] args = parse_args(args) # optionally choose specific GPU if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu K.set_session(get_session()) # create the generators train_generator, validation_generator = create_generators(args) num_classes = train_generator.num_classes() model, prediction_model = yolo_body(num_classes=num_classes) # create the model print('Loading model, this may take a second...') model.load_weights(args.snapshot, by_name=True, skip_mismatch=True) # freeze layers if args.freeze_body == 'darknet': for i in range(185): model.layers[i].trainable = False elif args.freeze_body == 'yolo': for i in range(len(model.layers) - 18): model.layers[i].trainable = False # compile model model.compile( loss={ 'cls_loss': lambda y_true, y_pred: y_pred, 'regr_loss': lambda y_true, y_pred: y_pred, }, optimizer=Adam(lr=1e-3) # optimizer=SGD(lr=1e-4, momentum=0.9, nesterov=True, decay=1e-4) ) # print model summary # print(model.summary()) # create the callbacks callbacks = create_callbacks( model, prediction_model, validation_generator, args, ) if not args.compute_val_loss: validation_generator = None # start training return model.fit_generator(generator=train_generator, steps_per_epoch=args.steps, initial_epoch=0, epochs=args.epochs, verbose=1, callbacks=callbacks, workers=args.workers, use_multiprocessing=args.multiprocessing, max_queue_size=args.max_queue_size, validation_data=validation_generator)