예제 #1
0
def run(img_dir, output_dir, img_size, num_classes, weights, conf_thres,
        nms_thres, show):
    shutil.rmtree(output_dir, ignore_errors=True)
    os.makedirs(output_dir, exist_ok=True)
    model = YOLOV3(num_classes, img_size)
    state_dict = torch.load(weights, map_location='cpu')
    model.load_state_dict(state_dict['model'])
    model = model.to(device)
    model.eval()
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(80)]
    names = [n for n in os.listdir(img_dir) if osp.splitext(n)[1] in IMG_EXT]
    names.sort()
    for name in tqdm(names):
        img = cv2.imread(osp.join(img_dir, name))
        det = inference(model, [img], img_size, conf_thres, nms_thres)[0]
        det_txt = []
        # Write results
        for *xyxy, conf, _, cls in det:
            det_txt.append(' '.join(['%g'] * 6) % (*xyxy, cls, conf))
            if show:  # Add bbox to image
                label = '%d %.2f' % (int(cls), conf)
                plot_one_box(xyxy, img, label=label, color=colors[int(cls)])
        with open(osp.join(output_dir,
                           osp.splitext(name)[0] + '.txt'), 'w') as f:
            f.write('\n'.join(det_txt))
        # Stream results
        if show:
            cv2.imshow('yolo', img)
            cv2.waitKey(1)
        # Save results (image with detections)
        cv2.imwrite(osp.join(output_dir, name), img)
예제 #2
0
def export2caffe(weights, num_classes, img_size):
    os.environ['MODEL_EXPORT'] = '1'
    model = YOLOV3(num_classes)
    weights = torch.load(weights, map_location='cpu')
    model.load_state_dict(weights['model'])
    model.eval()
    fuse(model)
    name = 'RYOLOV3'
    dummy_input = torch.ones([1, 3, img_size[1], img_size[0]])
    pytorch2caffe.trans_net(model, dummy_input, name)
    pytorch2caffe.save_prototxt('{}.prototxt'.format(name))
    pytorch2caffe.save_caffemodel('{}.caffemodel'.format(name))
예제 #3
0
def train(data_dir, epochs, img_size, batch_size, accumulate, lr, adam, resume,
          weights, num_workers, multi_scale, rect, mixed_precision, notest,
          nosave):
    train_coco = osp.join(data_dir, 'train.json')
    val_coco = osp.join(data_dir, 'val.json')

    train_data = CocoDataset(train_coco,
                             img_size=img_size,
                             multi_scale=multi_scale,
                             rect=rect,
                             with_label=True,
                             mosaic=False)
    train_loader = DataLoader(
        train_data,
        batch_size=batch_size,
        shuffle=not (dist.is_initialized()),
        sampler=DistributedSampler(train_data, dist.get_world_size(),
                                   dist.get_rank())
        if dist.is_initialized() else None,
        pin_memory=True,
        num_workers=num_workers,
        collate_fn=CocoDataset.collate_fn,
    )
    train_fetcher = Fetcher(train_loader, train_data.post_fetch_fn)
    if not notest:
        val_data = CocoDataset(val_coco,
                               img_size=img_size,
                               augments=None,
                               rect=rect)
        val_loader = DataLoader(
            val_data,
            batch_size=batch_size,
            shuffle=not (dist.is_initialized()),
            sampler=DistributedSampler(val_data, dist.get_world_size(),
                                       dist.get_rank())
            if dist.is_initialized() else None,
            pin_memory=True,
            num_workers=num_workers,
            collate_fn=CocoDataset.collate_fn,
        )
        val_fetcher = Fetcher(val_loader, post_fetch_fn=val_data.post_fetch_fn)

    model = YOLOV3(len(train_data.classes))

    trainer = Trainer(model,
                      train_fetcher,
                      loss_fn=compute_loss,
                      workdir='weights',
                      accumulate=accumulate,
                      adam=adam,
                      lr=lr,
                      weights=weights,
                      resume=resume,
                      mixed_precision=mixed_precision)
    while trainer.epoch < epochs:
        trainer.step()
        best = False
        if not notest:
            metrics = test(trainer.model, val_fetcher, conf_thres=0.1)
            if metrics > trainer.metrics:
                best = True
                print('save best, mAP: %g' % metrics)
                trainer.metrics = metrics
        if not nosave:
            trainer.save(best)
예제 #4
0
                        type=float,
                        default=0.1,
                        help='object confidence threshold')
    parser.add_argument('--nms-thres',
                        type=float,
                        default=0.5,
                        help='iou threshold for non-maximum suppression')
    opt = parser.parse_args()

    val_data = CocoDataset(opt.val,
                           img_size=opt.img_size,
                           augments=None,
                           rect=opt.rect)
    val_loader = DataLoader(
        val_data,
        batch_size=opt.batch_size,
        pin_memory=True,
        num_workers=opt.num_workers,
        collate_fn=CocoDataset.collate_fn,
    )
    val_fetcher = Fetcher(val_loader, post_fetch_fn=val_data.post_fetch_fn)
    model = YOLOV3(len(val_data.classes))
    if opt.weights:
        state_dict = torch.load(opt.weights, map_location='cpu')
        model.load_state_dict(state_dict['model'])
    metrics = test(model,
                   val_fetcher,
                   conf_thres=opt.conf_thres,
                   nms_thres=opt.nms_thres)
    print('metrics: %8g' % (metrics))