Example #1
0
    def __init__(self,ops,device):
        self.ops = ops
        self.img_size = ops.img_size
        self.classes = load_classes(parse_data_cfg(ops.data_cfg)['names'])
        self.num_classes = len(self.classes)

        if "tiny" in ops.detect_network:
            a_scalse = 416./ops.img_size
            anchors=[(10, 14), (23, 27), (37, 58), (81, 82), (135, 169), (344, 319)]
            anchors_new = [ (int(anchors[j][0]/a_scalse),int(anchors[j][1]/a_scalse)) for j in range(len(anchors)) ]
            model = Yolov3Tiny(self.num_classes,anchors = anchors_new)
            weights = ops.detect_model
            print('network : yolov3 - tiny')
        else:
            a_scalse = 416./ops.img_size
            anchors=[(10,13), (16,30), (33,23), (30,61), (62,45), (59,119), (116,90), (156,198), (373,326)]
            anchors_new = [ (int(anchors[j][0]/a_scalse),int(anchors[j][1]/a_scalse)) for j in range(len(anchors)) ]
            model = Yolov3(self.num_classes,anchors = anchors_new)
            weights = ops.detect_model
            print('network : yolov3')

        self.model = model
        yolo_model_param(self.model)# 显示模型参数

        self.device = device
        self.use_cuda = torch.cuda.is_available()
        # Load weights
        if os.access(weights,os.F_OK):# 判断模型文件是否存在
            self.model.load_state_dict(torch.load(weights, map_location=self.device)['model'])
        else:
            print('------- >>> error model not exists')
            return False
        self.model.to(self.device).eval()#模型设置为 eval
Example #2
0
    def __init__(self,
                 data_cfg,
                 model_cfg,
                 model_path,
                 img_size,
                 conf_thres=0.35,
                 nms_thres=0.45):
        print('model_path : ', model_path)
        self.img_size = img_size
        self.conf_thres = conf_thres
        self.nms_thres = nms_thres
        self.classes = load_classes(parse_data_cfg(data_cfg)['names'])
        self.num_classes = len(self.classes)

        if "tiny" in model_cfg:
            print('YOLO V3 - tiny')
            a_scalse = 416. / img_size
            anchors = [(10, 14), (23, 27), (37, 58), (81, 82), (135, 169),
                       (344, 319)]
            anchors_new = [(int(anchors[j][0] / a_scalse),
                            int(anchors[j][1] / a_scalse))
                           for j in range(len(anchors))]
            model = Yolov3Tiny(self.num_classes, anchors=anchors_new)
            weights = model_path
        else:
            print('YOLO V3')
            a_scalse = 416. / img_size
            anchors = [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
                       (59, 119), (116, 90), (156, 198), (373, 326)]
            anchors_new = [(int(anchors[j][0] / a_scalse),
                            int(anchors[j][1] / a_scalse))
                           for j in range(len(anchors))]
            model = Yolov3(self.num_classes, anchors=anchors_new)
            weights = model_path

        self.model = model
        #show_model_param(self.model)# 显示模型参数

        self.device = select_device()  # 运行硬件选择
        self.use_cuda = torch.cuda.is_available()
        # Load weights
        if os.access(weights, os.F_OK):  # 判断模型文件是否存在
            self.model.load_state_dict(
                torch.load(weights, map_location=self.device)['model'])
        else:
            print('------- >>> plus yolo tiny <dinner> error model not exists')
            return False
        self.model.to(self.device).eval()  #模型设置为 eval
Example #3
0
def train(data_cfg='cfg/voc.data', accumulate=1):
    device = select_device()
    # Config
    get_data_cfg = parse_data_cfg(data_cfg)  #返回训练配置参数,类型:字典

    gpus = get_data_cfg['gpus']
    num_workers = int(get_data_cfg['num_workers'])
    cfg_model = get_data_cfg['cfg_model']
    train_path = get_data_cfg['train']
    valid_ptah = get_data_cfg['valid']
    num_classes = int(get_data_cfg['classes'])
    finetune_model = get_data_cfg['finetune_model']
    batch_size = int(get_data_cfg['batch_size'])
    img_size = int(get_data_cfg['img_size'])
    multi_scale = get_data_cfg['multi_scale']
    epochs = int(get_data_cfg['epochs'])
    lr_step = str(get_data_cfg['lr_step'])

    if multi_scale == 'True':
        multi_scale = True
    else:
        multi_scale = False

    print('data_cfg            : ', data_cfg)
    print('voc.data config len : ', len(get_data_cfg))
    print('gpus             : ', gpus)
    print('num_workers      : ', num_workers)
    print('model            : ', cfg_model)
    print('finetune_model   : ', finetune_model)
    print('train_path       : ', train_path)
    print('valid_ptah       : ', valid_ptah)
    print('num_classes      : ', num_classes)
    print('batch_size       : ', batch_size)
    print('img_size         : ', img_size)
    print('multi_scale      : ', multi_scale)
    print('lr_step          : ', lr_step)
    # load model
    if "tiny" in cfg_model:
        a_scalse = 416. / img_size
        anchors = [(10, 14), (23, 27), (37, 58), (81, 82), (135, 169),
                   (344, 319)]
        anchors_new = [(int(anchors[j][0] / a_scalse),
                        int(anchors[j][1] / a_scalse))
                       for j in range(len(anchors))]
        print('old anchors : ', anchors)
        model = Yolov3Tiny(num_classes, anchors=anchors_new)
        weights = './weights-yolov3-tiny/'
    else:
        a_scalse = 416. / img_size
        anchors = [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45), (59, 119),
                   (116, 90), (156, 198), (373, 326)]
        anchors_new = [(int(anchors[j][0] / a_scalse),
                        int(anchors[j][1] / a_scalse))
                       for j in range(len(anchors))]
        model = Yolov3(num_classes, anchors=anchors_new)
        weights = './weights-yolov3/'
    # make dir save model document
    if not os.path.exists(weights):
        os.mkdir(weights)

    latest = weights + 'latest.pt'
    best = weights + 'best.pt'
    # Optimizer
    lr0 = 0.001  # initial learning rate
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=lr0,
                                momentum=0.9,
                                weight_decay=0.0005)

    start_epoch = 0
    model = model.to(device)

    print(finetune_model)

    if os.access(finetune_model, os.F_OK):
        print(
            '\n/************************** load_model *************************/'
        )
        print(finetune_model)
        load_model(model, torch.load(finetune_model))
    else:
        print('finetune_model not exist !')

    milestones = [int(i) for i in lr_step.split(",")]
    print('milestones : ', milestones)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[int(i) for i in lr_step.split(",")],
        gamma=0.1,
        last_epoch=start_epoch - 1)

    # Initialize distributed training
    if torch.cuda.device_count() > 1:
        dist.init_process_group(backend=opt.backend,
                                init_method=opt.dist_url,
                                world_size=opt.world_size,
                                rank=opt.rank)
        model = torch.nn.parallel.DistributedDataParallel(model)

    # Dataset
    print('multi_scale : ', multi_scale)
    dataset = LoadImagesAndLabels(train_path,
                                  batch_size=batch_size,
                                  img_size=img_size,
                                  augment=True,
                                  multi_scale=multi_scale)
    print('--------------->>> imge num : ', dataset.__len__())
    # Dataloader
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            num_workers=num_workers,
                            shuffle=True,
                            pin_memory=False,
                            drop_last=False,
                            collate_fn=dataset.collate_fn)

    # Start training
    t = time.time()
    model_info(model)
    nB = len(dataloader)
    n_burnin = min(round(nB / 5 + 1), 1000)  # burn-in batches

    best_loss = float('inf')
    test_loss = float('inf')

    for epoch in range(start_epoch, epochs):

        print('')
        model.train()

        scheduler.step()

        mloss = defaultdict(float)  # mean loss
        for i, (imgs, targets, img_path_, _) in enumerate(dataloader):
            multi_size = imgs.size()
            imgs = imgs.to(device)
            targets = targets.to(device)

            nt = len(targets)
            if nt == 0:  # if no targets continue
                continue

            # SGD burn-in
            if epoch == 0 and i <= n_burnin:
                lr = lr0 * (i / n_burnin)**4
                for x in optimizer.param_groups:
                    x['lr'] = lr

            # Run model
            pred = model(imgs)

            # Build targets
            target_list = build_targets(model, targets)

            # Compute loss
            loss, loss_dict = compute_loss(pred, target_list)

            # Compute gradient
            loss.backward()

            # Accumulate gradient for x batches before optimizing
            if (i + 1) % accumulate == 0 or (i + 1) == nB:
                optimizer.step()
                optimizer.zero_grad()

            # Running epoch-means of tracked metrics
            for key, val in loss_dict.items():
                mloss[key] = (mloss[key] * i + val) / (i + 1)

            print(
                'Epoch {:3d}/{:3d}, Batch {:6d}/{:6d}, Img_size {}x{}, nTargets {}, lr {:.6f}, loss: xy {:.2f}, wh {:.2f}, '
                'conf {:.2f}, cls {:.2f}, total {:.2f}, time {:.3f}s'.format(
                    epoch, epochs - 1, i, nB - 1, multi_size[2], multi_size[3],
                    nt,
                    scheduler.get_lr()[0], mloss['xy'], mloss['wh'],
                    mloss['conf'], mloss['cls'], mloss['total'],
                    time.time() - t),
                end='\r')

            s = ('%8s%12s' + '%10.3g' * 7) % (
                '%g/%g' % (epoch, epochs - 1), '%g/%g' %
                (i, nB - 1), mloss['xy'], mloss['wh'], mloss['conf'],
                mloss['cls'], mloss['total'], nt, time.time() - t)
            t = time.time()

        if epoch % 5 == 0 and epoch > 0:
            # Calculate mAP
            print('\n')
            with torch.no_grad():
                print("-------" * 5 + "testing" + "-------" * 5)
                results = test.test(cfg_model,
                                    data_cfg,
                                    batch_size=batch_size,
                                    img_size=img_size,
                                    model=model)
            # Update best loss
            test_loss = results[4]
            if test_loss < best_loss:
                best_loss = test_loss

        if True:
            # Create checkpoint
            chkpt = {
                'epoch':
                epoch,
                'best_loss':
                best_loss,
                'model':
                model.module.state_dict()
                if type(model) is nn.parallel.DistributedDataParallel else
                model.state_dict(),
                'optimizer':
                optimizer.state_dict()
            }

            # Save latest checkpoint
            torch.save(chkpt, latest)

            # Save best checkpoint
            if best_loss == test_loss and epoch % 5 == 0:
                torch.save(chkpt, best)

            # Save backup every 10 epochs (optional)
            if epoch > 0 and epoch % 5 == 0:
                torch.save(chkpt, weights + 'Detect%g.pt' % epoch)

            # Delete checkpoint
            del chkpt
Example #4
0
def train(data_cfg='cfg/voc_coco.data', accumulate=1):
    device = select_device()
    # Configure run
    get_data_cfg = parse_data_cfg(data_cfg)  #返回训练配置参数,类型:字典

    gpus = get_data_cfg['gpus']
    num_workers = int(get_data_cfg['num_workers'])
    cfg_model = get_data_cfg['cfg_model']
    train_path = get_data_cfg['train']
    valid_ptah = get_data_cfg['valid']
    num_classes = int(get_data_cfg['classes'])
    finetune_model = get_data_cfg['finetune_model']
    batch_size = int(get_data_cfg['batch_size'])
    img_size = int(get_data_cfg['img_size'])
    multi_scale = get_data_cfg['multi_scale']
    epochs = int(get_data_cfg['epochs'])
    lr_step = str(get_data_cfg['lr_step'])

    if multi_scale == 'True':
        multi_scale = True
    else:
        multi_scale = False

    print('data_cfg            : ', data_cfg)
    print('voc.data config len : ', len(get_data_cfg))
    print('gpus             : ', gpus)
    print('num_workers      : ', num_workers)
    print('model            : ', cfg_model)
    print('finetune_model   : ', finetune_model)
    print('train_path       : ', train_path)
    print('valid_ptah       : ', valid_ptah)
    print('num_classes      : ', num_classes)
    print('batch_size       : ', batch_size)
    print('img_size         : ', img_size)
    print('multi_scale      : ', multi_scale)
    print('lr_step          : ', lr_step)
    # load model
    if "-tiny" in cfg_model:
        model = Yolov3Tiny(num_classes)
        weights = './weights-yolov3-tiny/'
    else:
        model = Yolov3(num_classes)
        weights = './weights-yolov3/'
    # mkdir save model document
    if not os.path.exists(weights):
        os.mkdir(weights)

    model = model.to(device)
    latest = weights + 'latest.pt'
    best = weights + 'best.pt'
    # Optimizer
    lr0 = 0.001  # initial learning rate
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=lr0,
                                momentum=0.9,
                                weight_decay=0.0005)

    start_epoch = 0

    if os.access(finetune_model, os.F_OK):  # load retrain/finetune_model
        print('loading yolo-v3 finetune_model ~~~~~~', finetune_model)
        not_load_filters = 3 * (80 + 5)  # voc: 3*(20+5), coco: 3*(80+5)=255
        chkpt = torch.load(finetune_model, map_location=device)
        model.load_state_dict(
            {
                k: v
                for k, v in chkpt['model'].items()
                if v.numel() > 1 and v.shape[0] != not_load_filters
            },
            strict=False)
        # model.load_state_dict(chkpt['model'])
        start_epoch = chkpt['epoch']
        if chkpt['optimizer'] is not None:
            optimizer.load_state_dict(chkpt['optimizer'])
            best_loss = chkpt['best_loss']

    # Set scheduler (reduce lr at epochs 218, 245, i.e. batches 400k, 450k) gamma:学习率下降的乘数因子
    milestones = [int(i) for i in lr_step.split(",")]
    print('milestones : ', milestones)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[int(i) for i in lr_step.split(",")],
        gamma=0.1,
        last_epoch=start_epoch - 1)

    # Initialize distributed training
    if torch.cuda.device_count() > 1:
        dist.init_process_group(backend=opt.backend,
                                init_method=opt.dist_url,
                                world_size=opt.world_size,
                                rank=opt.rank)
        model = torch.nn.parallel.DistributedDataParallel(model)

    # Dataset
    print('multi_scale : ', multi_scale)
    dataset = LoadImagesAndLabels(train_path,
                                  batch_size=batch_size,
                                  img_size=img_size,
                                  augment=True,
                                  multi_scale=multi_scale)

    # Dataloader
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            num_workers=num_workers,
                            shuffle=True,
                            pin_memory=False,
                            drop_last=False,
                            collate_fn=dataset.collate_fn)

    # Start training
    t = time.time()
    model_info(model)
    nB = len(dataloader)
    n_burnin = min(round(nB / 5 + 1), 1000)  # burn-in batches

    best_loss = float('inf')
    test_loss = float('inf')

    for epoch in range(start_epoch, epochs):
        print()
        model.train()
        # Update scheduler
        scheduler.step()

        mloss = defaultdict(float)  # mean loss
        for i, (imgs, targets, img_path_, _) in enumerate(dataloader):
            multi_size = imgs.size()
            imgs = imgs.to(device)
            targets = targets.to(device)

            nt = len(targets)
            if nt == 0:  # if no targets continue
                continue

            # SGD burn-in
            if epoch == 0 and i <= n_burnin:
                lr = lr0 * (i / n_burnin)**4
                for x in optimizer.param_groups:
                    x['lr'] = lr

            # Run model
            pred = model(imgs)

            # Build targets
            target_list = build_targets(model, targets)

            # Compute loss
            loss, loss_dict = compute_loss(pred, target_list)

            # Compute gradient
            loss.backward()

            # Accumulate gradient for x batches before optimizing
            if (i + 1) % accumulate == 0 or (i + 1) == nB:
                optimizer.step()
                optimizer.zero_grad()

            # Running epoch-means of tracked metrics
            for key, val in loss_dict.items():
                mloss[key] = (mloss[key] * i + val) / (i + 1)

            print(
                'Epoch {:3d}/{:3d}, Batch {:6d}/{:6d}, Img_size {}x{}, nTargets {}, lr {:.6f}, loss: xy {:.2f}, wh {:.2f}, '
                'conf {:.2f}, cls {:.2f}, total {:.2f}, time {:.3f}s'.format(
                    epoch, epochs - 1, i, nB - 1, multi_size[2], multi_size[3],
                    nt,
                    scheduler.get_lr()[0], mloss['xy'], mloss['wh'],
                    mloss['conf'], mloss['cls'], mloss['total'],
                    time.time() - t))

            s = ('%8s%12s' + '%10.3g' * 7) % (
                '%g/%g' % (epoch, epochs - 1), '%g/%g' %
                (i, nB - 1), mloss['xy'], mloss['wh'], mloss['conf'],
                mloss['cls'], mloss['total'], nt, time.time() - t)
            t = time.time()

        if epoch % 10 == 0:
            # Calculate mAP
            print('\n')
            with torch.no_grad():
                print("-------" * 5 + "testing" + "-------" * 5)
                results = test.test(cfg_model,
                                    data_cfg,
                                    batch_size=batch_size,
                                    img_size=img_size,
                                    model=model)
            # Update best loss
            test_loss = results[4]
            if test_loss < best_loss:
                best_loss = test_loss

        if True:
            # Create checkpoint
            chkpt = {
                'epoch':
                epoch,
                'best_loss':
                best_loss,
                'model':
                model.module.state_dict()
                if type(model) is nn.parallel.DistributedDataParallel else
                model.state_dict(),
                'optimizer':
                optimizer.state_dict()
            }

            # Save latest checkpoint
            torch.save(chkpt, latest)

            # Save best checkpoint
            if best_loss == test_loss and epoch % 5 == 0:
                torch.save(chkpt, best)

            # Save backup every 10 epochs (optional)
            if epoch > 0 and epoch % 5 == 0:
                torch.save(chkpt, weights + 'backup%g.pt' % epoch)

            # Delete checkpoint
            del chkpt
Example #5
0
def detect(
        model_path,
        root_path,
        cfg,
        data_cfg,
        img_size=416,
        conf_thres=0.5,
        nms_thres=0.5,
):
    classes = load_classes(parse_data_cfg(data_cfg)['names'])
    num_classes = len(classes)
    # Initialize model
    if "-tiny" in cfg:
        model = Yolov3Tiny(num_classes)
        weights = model_path
    else:
        model = Yolov3(num_classes)
        weights = model_path

    show_model_param(model)# 显示模型参数

    device = select_device() # 运行硬件选择
    use_cuda = torch.cuda.is_available()
    # Load weights
    if os.access(weights,os.F_OK):# 判断模型文件是否存在
        model.load_state_dict(torch.load(weights, map_location=device)['model'])
    else:
        print('error model not exists')
        return False
    model.to(device).eval()#模型设置为 eval

    colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, num_classes + 1)][::-1]

    for img_name in os.listdir(root_path):
        img_path  = root_path + img_name
        im0 = cv2.imread(img_path)
        print("---------------------")

        t = time.time()
        img = process_data(im0, img_size)
        if use_cuda:
            torch.cuda.synchronize()
        t1 = time.time()
        print("process time:", t1-t)
        img = torch.from_numpy(img).unsqueeze(0).to(device)

        pred, _ = model(img)#图片检测
        if use_cuda:
            torch.cuda.synchronize()
        t2 = time.time()
        print("inference time:", t2-t1)
        detections = non_max_suppression(pred, conf_thres, nms_thres)[0] # nms
        if use_cuda:
            torch.cuda.synchronize()
        t3 = time.time()
        print("get res time:", t3-t2)
        if detections is None or len(detections) == 0:
            continue
        # Rescale boxes from 416 to true image size
        detections[:, :4] = scale_coords(img_size, detections[:, :4], im0.shape).round()
        result = []
        for res in detections:
            result.append((classes[int(res[-1])], float(res[4]), [int(res[0]), int(res[1]), int(res[2]), int(res[3])]))
        if use_cuda:
            torch.cuda.synchronize()
        s2 = time.time()
        print("detect time:", s2 - t)
        print(result)

        # Draw bounding boxes and labels of detections
        for *xyxy, conf, cls_conf, cls in detections:
            label = '%s %.2f' % (classes[int(cls)], conf)
            plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])

        cv2.namedWindow('result',0)
        cv2.imshow("result", im0)
        key = cv2.waitKey(0)
        if key == 27:
            break
Example #6
0
def test(cfg,
         data_cfg,
         batch_size=16,
         img_size=416,
         iou_thres=0.5,
         conf_thres=0.3,
         nms_thres=0.5,
         model=None):
    # Configure run
    data_cfg = parse_data_cfg(data_cfg)
    nc = int(data_cfg['classes'])  # number of classes
    test_path = data_cfg['valid']  # path to test images
    names = load_classes(data_cfg['names'])  # class names

    if model is None:
        device = select_device()
        num_classes = nc
        # Initialize model
        if "-tiny" in cfg:
            model = Yolov3Tiny(num_classes).to(device)
            # weights = 'weights-yolov3-tiny/best.pt'
            weights = "./yolov3-tiny_coco.pt"
        else:
            model = Yolov3(num_classes).to(device)
            # weights = 'weights-yolov3/best.pt'
            weights = "./finetune-weight/yolov3_coco.pt"

        # Load weights
        model.load_state_dict(
            torch.load(weights, map_location=device)['model'])

        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    else:
        device = next(model.parameters()).device  # get model device
    print("using device: {}".format(device))
    # Dataloader
    dataset = LoadImagesAndLabels(test_path,
                                  batch_size,
                                  img_size=img_size,
                                  augment=False)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            num_workers=0,
                            pin_memory=False,
                            collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    print(('%20s' + '%10s' * 6) %
          ('Class', 'Images', 'Targets', 'P', 'R', 'mAP', 'F1'))
    loss, p, r, f1, mp, mr, map, mf1 = 0., 0., 0., 0., 0., 0., 0., 0.
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (imgs, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc='Computing mAP')):
        targets = targets.to(device)
        nt = len(targets)
        if nt == 0:  # if no targets continue
            continue
        imgs = imgs.to(device)
        # Run model
        inf_out, train_out = model(imgs)  # inference and training outputs

        # Build targets
        target_list = build_targets(model, targets)

        # Compute loss
        loss_i, _ = compute_loss(train_out, target_list)
        loss += loss_i.item()

        # Run NMS
        output = non_max_suppression(inf_out,
                                     conf_thres=conf_thres,
                                     nms_thres=nms_thres)
        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            correct, detected = [], []
            tcls = torch.Tensor()
            seen += 1

            if pred is None:
                if len(labels):
                    tcls = labels[:, 0].cpu()  # target classes
                    stats.append(
                        (correct, torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to pycocotools JSON dictionary

            if len(labels):
                # Extract target boxes as (x1, y1, x2, y2)
                tbox = xywh2xyxy(labels[:, 1:5]) * img_size  # target boxes
                tcls = labels[:, 0]  # target classes

                for *pbox, pconf, pcls_conf, pcls in pred:
                    if pcls not in tcls:
                        correct.append(0)
                        continue

                    # Best iou, index between pred and targets
                    iou, bi = bbox_iou(pbox, tbox).max(0)

                    # If iou > threshold and class is correct mark as correct
                    if iou > iou_thres and bi not in detected:
                        correct.append(1)
                        detected.append(bi)
                    else:
                        correct.append(0)
            else:
                # If no labels add number of detections as incorrect
                correct.extend([0] * len(pred))

            # Append Statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct, pred[:, 4].cpu(), pred[:, 6].cpu(), tcls.cpu()))

    # Compute statistics
    stats_np = [np.concatenate(x, 0) for x in list(zip(*stats))]
    nt = np.bincount(stats_np[3].astype(np.int64),
                     minlength=nc)  # number of targets per class
    if len(stats_np):
        p, r, ap, f1, ap_class = ap_per_class(*stats_np)
        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()

    # Print results
    pf = '%20s' + '%10.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1), end='\n\n')

    # Print results per class
    if nc > 1 and len(stats_np):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

    # Return results
    return mp, mr, map, mf1, loss
Example #7
0
def detect(
    model_path,
    classify_model_path,
    label_path,
    root_path,
    cfg,
    data_cfg,
    img_size=416,
    conf_thres=0.5,
    nms_thres=0.5,
):
    classes = load_classes(parse_data_cfg(data_cfg)['names'])
    num_classes = len(classes)
    # Initialize model
    if "-tiny" in cfg:
        model = Yolov3Tiny(num_classes)
        weights = model_path
    else:
        model = Yolov3(num_classes)
        weights = model_path

    show_model_param(model)  # 显示模型参数

    device = select_device(False)  # 运行硬件选择

    classify_model, labels_dogs_list = Create_Classify_Model(
        device, classify_model_path, label_path)

    # Load weights
    if os.access(weights, os.F_OK):  # 判断模型文件是否存在
        model.load_state_dict(
            torch.load(weights, map_location=device)['model'])
    else:
        print('error model not exists')
        return False
    model.to(device).eval()  # 设置 模型 eval

    colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32)
              for v in range(1, num_classes + 1)][::-1]
    use_cuda = torch.cuda.is_available()
    for img_name in os.listdir(root_path):
        img_path = root_path + img_name
        im0 = cv2.imread(img_path)
        im_c = cv2.imread(img_path)
        print("---------------------")

        t = time.time()
        img = process_data(im0, img_size)
        if use_cuda:
            torch.cuda.synchronize()
        t1 = time.time()
        print("process time:", t1 - t)
        img = torch.from_numpy(img).unsqueeze(0).to(device)

        pred, _ = model(img)
        if use_cuda:
            torch.cuda.synchronize()
        t2 = time.time()
        print("inference time:", t2 - t1)
        detections = non_max_suppression(pred, conf_thres, nms_thres)[0]
        if use_cuda:
            torch.cuda.synchronize()
        t3 = time.time()
        print("get res time:", t3 - t2)
        if detections is None or len(detections) == 0:
            continue
        # Rescale boxes from 416 to true image size
        detections[:, :4] = scale_coords(img_size, detections[:, :4],
                                         im0.shape).round()
        result = []
        for res in detections:
            result.append(
                (classes[int(res[-1])], float(res[4]),
                 [int(res[0]),
                  int(res[1]),
                  int(res[2]),
                  int(res[3])]))
        if use_cuda:
            torch.cuda.synchronize()
        s2 = time.time()
        print("detect time:", s2 - t)
        print(result)

        # Draw bounding boxes and labels of detections
        for *xyxy, conf, cls_conf, cls in detections:
            label = '%s %.2f' % (classes[int(cls)], conf)

            #-------------------------------------------------------------------
            plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])

            x_1 = int(xyxy[0])
            y_1 = int(xyxy[1])
            x_2 = int(xyxy[2])
            y_2 = int(xyxy[3])
            #--------------------
            img_crop_ = cv2.resize(im_c[y_1:y_2, x_1:x_2, :], (224, 224),
                                   interpolation=cv2.INTER_CUBIC)
            img_crop_ = img_crop_.astype(np.float32)
            img_crop_ = prewhiten(img_crop_)

            img_crop_ = torch.from_numpy(img_crop_)
            img_crop_ = img_crop_.unsqueeze_(0)
            img_crop_ = img_crop_.permute(0, 3, 1, 2)

            if use_cuda:  #
                img_crop_ = img_crop_.cuda()  # (bs, 3, h, w)

            outputs = F.softmax(classify_model(img_crop_.float()), dim=1)

            outputs = outputs[0]
            outputx = outputs.cpu().detach().numpy()
            # print('output: ',output)
            max_index = np.argmax(outputx)

            scorex_ = outputx[max_index]
            label_dog_ = labels_dogs_list[max_index]

            print('label_dog_ : ', label_dog_)

            plot_one_box((x_1, y_1 + 20, x_2, y_2),
                         im0,
                         label=label_dog_ + '_' + '%.2f' % (scorex_),
                         color=colors[int(cls)])
            #-----------------------
            cv2.namedWindow('crop', 0)
            cv2.imshow('crop', im_c[y_1:y_2, x_1:x_2, :])

        cv2.namedWindow('result', 0)
        cv2.imshow("result", im0)
        key = cv2.waitKey(0)
        if key == 27:
            break