names = [] for name in path_dir: if name[len(name) - 2:len(name)] == 'h5' and name[0:4] == 'step': step = int(name[4:12]) steps.append(step) names.append(name) if len(steps) > 10: i = steps.index(min(steps)) os.remove('./weights/' + names[i]) logger.info('Save model to {}'.format(save_path)) # ==================== eval ==================== if iter_id % cfg.eval_iter == 0: box_ap = eval(_decode, val_images, cfg.val_pre_path, cfg.val_path, cfg.eval_batch_size, _clsid2catid, cfg.draw_image) logger.info("box ap: %.3f" % (box_ap[0], )) # 以box_ap作为标准 ap = box_ap if ap[0] > best_ap_list[0]: best_ap_list[0] = ap[0] best_ap_list[1] = iter_id model.save('./weights/best_model.h5') # Keras h5. # tf.saved_model.save(model, "./checkpoint") # TF SavedModel. logger.info("Best test ap: {}, in iter: {}".format( best_ap_list[0], best_ap_list[1])) # ==================== exit ==================== if iter_id == cfg.max_iters:
step = int(name[4:12]) steps.append(step) names.append(name) if len(steps) > 10: i = steps.index(min(steps)) os.remove('./weights/' + names[i]) logger.info('Save model to {}'.format(save_path)) # ==================== eval ==================== if iter_id % cfg.train_cfg['eval_iter'] == 0: if cfg.use_ema: ema.apply() ppyolo.eval() # 切换到验证模式 head.set_dropblock(is_test=True) box_ap = eval(_decode, val_images, cfg.val_pre_path, cfg.val_path, cfg.eval_cfg['eval_batch_size'], _clsid2catid, cfg.eval_cfg['draw_image'], cfg.eval_cfg['draw_thresh']) logger.info("box ap: %.3f" % (box_ap[0], )) ppyolo.train() # 切换到训练模式 head.set_dropblock(is_test=False) # 以box_ap作为标准 ap = box_ap if ap[0] > best_ap_list[0]: best_ap_list[0] = ap[0] best_ap_list[1] = iter_id torch.save(ppyolo.state_dict(), './weights/best_model.pt') if cfg.use_ema: ema.restore() logger.info("Best test ap: {}, in iter: {}".format( best_ap_list[0], best_ap_list[1]))
if algorithm == 'YOLOv4': output_l, output_m, output_s = YOLOv4(inputs, num_classes, num_anchors, is_test=False, trainable=True) elif algorithm == 'YOLOv3': backbone = Resnet50Vd() head = YOLOv3Head( keep_prob=1.0) # 一定要设置keep_prob=1.0, 为了得到一致的推理结果 yolov3 = YOLOv3(backbone, head) output_l, output_m, output_s = yolov3(inputs) eval_fetch_list = [output_l, output_m, output_s] eval_prog = eval_prog.clone(for_test=True) gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_prog) fluid.load(eval_prog, model_path, executor=exe) _decode = Decode(algorithm, anchors, conf_thresh, nms_thresh, input_shape, exe, eval_prog, all_classes) _clsid2catid = copy.deepcopy(clsid2catid) if num_classes != 80: # 如果不是COCO数据集,而是自定义数据集 _clsid2catid = {} for k in range(num_classes): _clsid2catid[k] = k box_ap = eval(_decode, eval_fetch_list, images, eval_pre_path, anno_file, eval_batch_size, _clsid2catid, draw_image)
if __name__ == '__main__': classes_path = 'data/coco_classes.txt' # model_path可以是'yolov4.h5'、'./weights/step00001000.h5'这些。 model_path = 'yolov4.h5' # model_path = './weights/step00001000.h5' # input_shape越大,精度会上升,但速度会下降。 # input_shape = (320, 320) # input_shape = (416, 416) input_shape = (608, 608) # 验证集图片的相对路径 eval_pre_path = '../COCO/val2017/' anno_file = '../COCO/annotations/instances_val2017.json' with open(anno_file, 'r', encoding='utf-8') as f2: for line in f2: line = line.strip() dataset = json.loads(line) images = dataset['images'] num_anchors = 3 all_classes = get_classes(classes_path) num_classes = len(all_classes) inputs = layers.Input(shape=(None, None, 3)) yolo = YOLOv4(inputs, num_classes, num_anchors) yolo.load_weights(model_path, by_name=True) _decode = Decode(0.05, 0.45, input_shape, yolo, all_classes) box_ap = eval(_decode, images, eval_pre_path, anno_file)
continue img_anno = val_dataset.loadImgs(img_id)[0] images.append(img_anno) all_classes = get_classes(cfg.classes_path) num_classes = len(all_classes) # 创建模型 Backbone = select_backbone(cfg.backbone_type) backbone = Backbone(**cfg.backbone) Fpn = select_fpn(cfg.fpn_type) fpn = Fpn(**cfg.fpn) Head = select_head(cfg.head_type) head = Head(fcos_loss=None, nms_cfg=cfg.nms_cfg, **cfg.head) fcos = FCOS(backbone, fpn, head) if use_gpu: fcos = fcos.cuda() fcos.load_state_dict(torch.load(model_path)) fcos.eval( ) # 必须调用model.eval()来设置dropout和batch normalization layers在运行推理前,切换到评估模式。 _clsid2catid = copy.deepcopy(clsid2catid) if num_classes != 80: # 如果不是COCO数据集,而是自定义数据集 _clsid2catid = {} for k in range(num_classes): _clsid2catid[k] = k _decode = Decode(fcos, all_classes, use_gpu, cfg, for_test=False) box_ap = eval(_decode, images, eval_pre_path, anno_file, eval_batch_size, _clsid2catid, draw_image, draw_thresh)
steps = [] names = [] for name in path_dir: if name[len(name) - 2:len(name)] == 'h5' and name[0:4] == 'step': step = int(name[4:12]) steps.append(step) names.append(name) if len(steps) > 10: i = steps.index(min(steps)) os.remove('./weights/' + names[i]) logger.info('Save model to {}'.format(save_path)) # eval if iter_id % eval_iter == 0: box_ap = eval(_decode, val_images, val_pre_path, val_path, eval_batch_size, _clsid2catid, draw_image) logger.info("box ap: %.3f" % (box_ap[0], )) # 以box_ap作为标准 ap = box_ap if ap[0] > best_ap_list[0]: best_ap_list[0] = ap[0] best_ap_list[1] = iter_id model.save('./weights/best_model.h5') logger.info("Best test ap: {}, in iter: {}".format( best_ap_list[0], best_ap_list[1])) # exit if iter_id == max_iters: logger.info('Done.') exit(0)
num_classes = len(all_classes) # 创建模型 Backbone = select_backbone(cfg.backbone_type) backbone = Backbone(**cfg.backbone) Head = select_head(cfg.head_type) head = Head(yolo_loss=None, nms_cfg=cfg.nms_cfg, **cfg.head) ppyolo = PPYOLO(backbone, head) if use_gpu: ppyolo = ppyolo.cuda() ppyolo.load_state_dict(torch.load(model_path)) ppyolo.eval( ) # 必须调用model.eval()来设置dropout和batch normalization layers在运行推理前,切换到评估模式. 不这样做的化会产生不一致的推理结果. _clsid2catid = copy.deepcopy(clsid2catid) if num_classes != 80: # 如果不是COCO数据集,而是自定义数据集 _clsid2catid = {} for k in range(num_classes): _clsid2catid[k] = k _decode = Decode(ppyolo, all_classes, use_gpu, cfg, for_test=False) eval(_decode, images, test_pre_path, anno_file, eval_batch_size, _clsid2catid, draw_image, draw_thresh, type='test_dev')
path_dir = os.listdir('./weights') steps = [] names = [] for name in path_dir: if name[len(name) - 2:len(name)] == 'h5' and name[0:4] == 'step': step = int(name[4:12]) steps.append(step) names.append(name) if len(steps) > 10: i = steps.index(min(steps)) os.remove('./weights/'+names[i]) logger.info('Save model to {}'.format(save_path)) # eval if iter_id % eval_iter == 0: box_ap = eval(_decode, val_images, val_pre_path, val_path, eval_batch_size, draw_image) logger.info("box ap: %.3f" % (box_ap[0], )) # 以box_ap作为标准 ap = box_ap if ap[0] > best_ap_list[0]: best_ap_list[0] = ap[0] best_ap_list[1] = iter_id model.save('./weights/best_model.h5') logger.info("Best test ap: {}, in iter: {}".format( best_ap_list[0], best_ap_list[1])) # exit if iter_id == max_iters: logger.info('Done.') exit(0)
# input_shape = (320, 320) # input_shape = (416, 416) input_shape = (608, 608) # 验证时的分数阈值和nms_iou阈值 conf_thresh = 0.001 nms_thresh = 0.45 # 是否画出验证集图片 draw_image = False # 验证时的批大小 eval_batch_size = 4 # 验证集图片的相对路径 eval_pre_path = '../COCO/val2017/' anno_file = '../COCO/annotations/instances_val2017.json' with open(anno_file, 'r', encoding='utf-8') as f2: for line in f2: line = line.strip() dataset = json.loads(line) images = dataset['images'] num_anchors = 3 all_classes = get_classes(classes_path) num_classes = len(all_classes) inputs = layers.Input(shape=(None, None, 3)) yolo = YOLOv4(inputs, num_classes, num_anchors) yolo.load_weights(model_path, by_name=True) _decode = Decode(conf_thresh, nms_thresh, input_shape, yolo, all_classes) box_ap = eval(_decode, images, eval_pre_path, anno_file, eval_batch_size, draw_image)