def build_model(cfg, loss):
    # define backbone
    Backbone = select_backbone(cfg.backbone_type)
    backbone = Backbone(**cfg.backbone)

    # define head
    Head = select_head(cfg.head_type)
    head = Head(yolo_loss=loss, is_train=True, nms_cfg=cfg.nms_cfg, **cfg.head)

    model = PPYOLO(backbone, head)

    return model
示例#2
0
    IouLoss = select_loss(cfg.iou_loss_type)
    iou_loss = IouLoss(**cfg.iou_loss)
    iou_aware_loss = None
    if cfg.head['iou_aware']:
        IouAwareLoss = select_loss(cfg.iou_aware_loss_type)
        iou_aware_loss = IouAwareLoss(**cfg.iou_aware_loss)
    Loss = select_loss(cfg.yolo_loss_type)
    yolo_loss = Loss(iou_loss=iou_loss,
                     iou_aware_loss=iou_aware_loss,
                     **cfg.yolo_loss)
    Head = select_head(cfg.head_type)
    head = Head(yolo_loss=yolo_loss,
                is_train=True,
                nms_cfg=cfg.nms_cfg,
                **cfg.head)
    ppyolo = PPYOLO(backbone, head)
    _decode = Decode(ppyolo, class_names, use_gpu, cfg, for_test=False)

    # 加载权重
    if cfg.train_cfg['model_path'] is not None:
        # 加载参数, 跳过形状不匹配的。
        load_weights(ppyolo, cfg.train_cfg['model_path'])

        strs = cfg.train_cfg['model_path'].split('step')
        if len(strs) == 2:
            iter_id = int(strs[1][:8])

        # 冻结,使得需要的显存减少。低显存的卡建议这样配置。
        backbone.freeze()

    if use_gpu:  # 如果有gpu可用,模型(包括了权重weight)存放在gpu显存里
示例#3
0
    # 步id,无需设置,会自动读。
    iter_id = 0

    # 创建模型
    Backbone = select_backbone(cfg.backbone_type)
    backbone = Backbone(**cfg.backbone)
    IouLoss = select_loss(cfg.iou_loss_type)
    iou_loss = IouLoss(**cfg.iou_loss)
    IouAwareLoss = select_loss(cfg.iou_aware_loss_type)
    iou_aware_loss = IouAwareLoss(**cfg.iou_aware_loss)
    Loss = select_loss(cfg.yolo_loss_type)
    yolo_loss = Loss(iou_loss=iou_loss, iou_aware_loss=iou_aware_loss, **cfg.yolo_loss)
    Head = select_head(cfg.head_type)
    head = Head(yolo_loss=yolo_loss, is_train=True, nms_cfg=cfg.nms_cfg, **cfg.head)
    ppyolo = PPYOLO(backbone, head, cfg.ema_decay)
    _decode = Decode(ppyolo, class_names, use_gpu, cfg, for_test=False)

    # 加载权重
    if cfg.train_cfg['model_path'] is not None:
        # 加载参数, 跳过形状不匹配的。
        load_weights(ppyolo, cfg.train_cfg['model_path'])

        strs = cfg.train_cfg['model_path'].split('step')
        if len(strs) == 2:
            iter_id = int(strs[1][:8])

        # 冻结,使得需要的显存减少。低显存的卡建议这样配置。
        backbone.freeze()

    if cfg.use_ema:
示例#4
0
def load_weights(path):
    state_dict = fluid.io.load_program_state(path)
    return state_dict

state_dict = load_weights(model_path)
print('============================================================')




# 创建模型
Backbone = select_backbone(cfg.backbone_type)
backbone = Backbone(**cfg.backbone)
Head = select_head(cfg.head_type)
head = Head(yolo_loss=None, **cfg.head)
ppyolo = PPYOLO(backbone, head)
if use_gpu:
    ppyolo = ppyolo.cuda()
ppyolo.eval()  # 必须调用model.eval()来设置dropout和batch normalization layers在运行推理前,切换到评估模式. 不这样做的化会产生不一致的推理结果.

print('\nCopying...')


def copy_conv_bn(conv_unit, w, scale, offset, m, v):
    conv_unit.conv.weight.data = torch.Tensor(w).cuda()
    conv_unit.bn.weight.data = torch.Tensor(scale).cuda()
    conv_unit.bn.bias.data = torch.Tensor(offset).cuda()
    conv_unit.bn.running_mean.data = torch.Tensor(m).cuda()
    conv_unit.bn.running_var.data = torch.Tensor(v).cuda()

def copy_conv(conv_layer, w, b):
示例#5
0
        ins_anno_ids = val_dataset.getAnnIds(imgIds=img_id, iscrowd=False)   # 读取这张图片所有标注anno的id
        if len(ins_anno_ids) == 0:
            continue
        img_anno = val_dataset.loadImgs(img_id)[0]
        images.append(img_anno)

    all_classes = get_classes(cfg.classes_path)
    num_classes = len(all_classes)


    # 创建模型
    Backbone = select_backbone(cfg.backbone_type)
    backbone = Backbone(**cfg.backbone)
    Head = select_head(cfg.head_type)
    head = Head(yolo_loss=None, nms_cfg=cfg.nms_cfg, **cfg.head)
    ppyolo = PPYOLO(backbone, head)
    if use_gpu:
        ppyolo = ppyolo.cuda()
    ppyolo.load_state_dict(torch.load(model_path))
    ppyolo.eval()  # 必须调用model.eval()来设置dropout和batch normalization layers在运行推理前,切换到评估模式. 不这样做的化会产生不一致的推理结果.

    _clsid2catid = copy.deepcopy(clsid2catid)
    if num_classes != 80:   # 如果不是COCO数据集,而是自定义数据集
        _clsid2catid = {}
        for k in range(num_classes):
            _clsid2catid[k] = k

    _decode = Decode(ppyolo, all_classes, use_gpu, cfg, for_test=False)
    box_ap = eval(_decode, images, eval_pre_path, anno_file, eval_batch_size, _clsid2catid, draw_image, draw_thresh)

示例#6
0
            dataset_text += line
        eval_dataset = json.loads(dataset_text)
        categories = eval_dataset['categories']
        for clsid, cate_dic in enumerate(categories):
            catid = cate_dic['id']
            cname = cate_dic['name']
            _catid2clsid[catid] = clsid
            _clsid2catid[clsid] = catid
            _clsid2cname[clsid] = cname
    class_names = []
    num_classes = len(_clsid2cname.keys())
    for clsid in range(num_classes):
        class_names.append(_clsid2cname[clsid])


    # 创建模型
    Backbone = select_backbone(cfg.backbone_type)
    backbone = Backbone(**cfg.backbone)
    Head = select_head(cfg.head_type)
    head = Head(yolo_loss=None, nms_cfg=cfg.nms_cfg, **cfg.head)
    model = PPYOLO(backbone, head)
    if use_gpu:
        model = model.cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()  # 必须调用model.eval()来设置dropout和batch normalization layers在运行推理前,切换到评估模式。
    head.set_dropblock(is_test=True)

    _decode = Decode(model, class_names, use_gpu, cfg, for_test=False)
    box_ap = eval(_decode, images, eval_pre_path, anno_file, eval_batch_size, _clsid2catid, draw_image, draw_thresh)

示例#7
0
    IouLoss = select_loss(cfg.iou_loss_type)
    iou_loss = IouLoss(**cfg.iou_loss)
    iou_aware_loss = None
    if cfg.head['iou_aware']:
        IouAwareLoss = select_loss(cfg.iou_aware_loss_type)
        iou_aware_loss = IouAwareLoss(**cfg.iou_aware_loss)
    Loss = select_loss(cfg.yolo_loss_type)
    yolo_loss = Loss(iou_loss=iou_loss,
                     iou_aware_loss=iou_aware_loss,
                     **cfg.yolo_loss)
    Head = select_head(cfg.head_type)
    head = Head(yolo_loss=yolo_loss,
                is_train=True,
                nms_cfg=cfg.nms_cfg,
                **cfg.head)
    model = PPYOLO(backbone, head)
    _decode = Decode(model, class_names, use_gpu, cfg, for_test=False)

    # 加载权重
    if cfg.train_cfg['model_path'] is not None:
        # 加载参数, 跳过形状不匹配的。
        load_weights(model, cfg.train_cfg['model_path'])

        strs = cfg.train_cfg['model_path'].split('step')
        if len(strs) == 2:
            iter_id = int(strs[1][:8])

    # 冻结,使得需要的显存减少。低显存的卡建议这样配置。
    backbone.freeze()

    if use_gpu:  # 如果有gpu可用,模型(包括了权重weight)存放在gpu显存里