Пример #1
0
def evaluate(dataset, dataset_root, config, checkpoint):

    cfg = yaml.safe_load(open(config))['config']
    transform = T.get_transforms(cfg['image-size'], training=False)

    if dataset == 'COCO':
        dataset = ssd.data.COCODetection(root=dataset_root,
                                         classes=cfg['classes'],
                                         transform=transform)
    elif dataset == 'VOC':
        dataset = ssd.data.VOCDetection(root=dataset_root,
                                        classes=cfg['classes'],
                                        transform=transform)
    else:
        dataset = ssd.data.LabelmeDataset(root=dataset_root,
                                          classes=cfg['classes'],
                                          transform=transform)

    print('Generating COCO dataset...', end=' ')
    coco_dataset = get_coco_api_from_dataset(dataset)
    print('done')

    model = ssd.SSD300(cfg)
    model.eval()
    model.load_state_dict(torch.load(checkpoint, map_location=device))
    model.to(device)

    ssd.engine.evaluate(model, dataset, coco_dataset, device)
Пример #2
0
def inference(im_path, checkpoint, config, traced, output):

    if not traced:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        cfg = yaml.safe_load(open(config))['config']
        idx_to_class = cfg['classes']

        model = ssd.SSD300(cfg)
        model.eval()

        checkpoint = torch.load(checkpoint, map_location=device)
        model.load_state_dict(checkpoint)
        model.to(device)
    else:
        device = torch.device('cpu')
        files = {'classes': ''}
        model = torch.jit.load(checkpoint,
                               map_location=device,
                               _extra_files=files)
        model.to(device)
        model.eval()
        idx_to_class = files['classes'].decode().split(',')

    im = cv2.imread(im_path)
    im_in = T.get_transforms(300, inference=True)(im)
    im_in = im_in.unsqueeze(0).to(device)

    with torch.no_grad():
        detections = model(im_in)

    scale = torch.as_tensor([im.shape[1], im.shape[0]] * 2)
    scale.unsqueeze_(0)

    if not traced:
        detections = detections[0]
        true_mask = detections['scores'] > .5
        scores = detections['scores'][true_mask].cpu().tolist()
        boxes = (detections['boxes'][true_mask].cpu() * scale).int().tolist()
        labels = detections['labels'][true_mask].cpu().tolist()
    else:
        boxes, labels, scores = detections
        true_mask = scores[0] > .5
        boxes = (boxes[0][true_mask] * scale).cpu().int().tolist()
        labels = labels[0][true_mask].cpu().tolist()

    names = [idx_to_class[i - 1] for i in labels]

    im = ssd.viz.draw_boxes(im, boxes, names)
    if output is not None:
        cv2.imwrite(output, im)

    cv2.imshow('prediction', im)
    cv2.waitKey(0)
Пример #3
0
def _smart_approach(image: Union[str, np.ndarray],
                    characters_size: Optional[ImageSize] = None,
                    return_rectangles: bool = False):
    global _model, _config

    import torch
    import ssd
    import ssd.transforms as T

    device = torch.device('cpu')
    tfms = T.get_transforms(300, inference=True)

    if isinstance(characters_size, int):
        characters_size = (characters_size, ) * 2

    if isinstance(image, str):
        image = cv2.imread(image)

    if _model is None:
        config = yaml.safe_load(open('ai/config.yml'))['config']

        _model = ssd.SSD300(config)
        _model.eval()

        checkpoint = torch.load('ai/checkpoint.pt', map_location=device)
        _model.load_state_dict(checkpoint)
        _model.to(device)

    im_in = tfms(image)
    im_in = im_in.unsqueeze(0).to(device)

    scale = torch.as_tensor([image.shape[1], image.shape[0]] * 2)
    scale.unsqueeze_(0)

    with torch.no_grad():
        detections = _model(im_in)[0]

    true_mask = detections['scores'] > .5
    boxes = (detections['boxes'][true_mask].cpu() * scale).int().numpy()
    characters = [
        image[y_min:y_max, x_min:x_max] for x_min, y_min, x_max, y_max in boxes
    ]

    if characters_size is not None:
        characters = np.array(
            [cv2.resize(o, characters_size) for o in characters])

    if not return_rectangles:
        return characters
    else:
        return characters, valid_rects
Пример #4
0
 def __init__(self):
     super(SSD_Last,self).__init__()
     ori_ssd = ssd.SSD300()
     self.fit_ssd = nn.Conv2d(256,512,kernel_size=3,stride=1,padding=2,dilation=2,bias=False)
     self.norm4 = ori_ssd.norm4
     self.conv5s = nn.Sequential(*[getattr(ori_ssd,conv) for conv in [
         'conv5_1','conv5_2','conv5_3',
     ]])
     self.conv6_7 = nn.Sequential(*[getattr(ori_ssd,conv) for conv in[
         'conv6','conv7',
     ]])
     self.conv8s = nn.Sequential(*[getattr(ori_ssd,conv) for conv in[
         'conv8_1','conv8_2',
     ]])
     self.conv9s = nn.Sequential(*[getattr(ori_ssd,conv) for conv in [
         'conv9_1','conv9_2',
     ]])
     self.conv10s = nn.Sequential(*[getattr(ori_ssd,conv) for conv in[
         'conv10_1','conv10_2',
     ]])
     self.conv11s = nn.Sequential(*[getattr(ori_ssd,conv) for conv in[
         'conv11_1','conv11_2',
     ]])
     self.multibox = getattr(ori_ssd,'multibox')
Пример #5
0
def train(dataset, dataset_root, config, basenet, checkpoint, save_dir, epochs,
          batch_size, num_workers, lr, momentum, wd, gamma, step_size, logdir):
    now = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    dataset_root = Path(dataset_root)
    basenet = Path(basenet)
    logdir = (Path(logdir) / now) if logdir is not None else None
    checkpoint = Path(checkpoint) if checkpoint is not None else None

    save_dir = Path(save_dir)
    save_dir.mkdir(exist_ok=True, parents=True)

    cfg = yaml.safe_load(open(config))['config']
    transform = T.get_transforms(cfg['image-size'], training=True)

    if dataset == 'COCO':
        viz_dataset = ssd.data.COCODetection(root=dataset_root,
                                             classes=cfg['classes'])
        dataset = ssd.data.COCODetection(root=dataset_root,
                                         classes=cfg['classes'],
                                         transform=transform)
    elif dataset == 'VOC':
        viz_dataset = ssd.data.VOCDetection(root=dataset_root,
                                            classes=cfg['classes'])

        dataset = ssd.data.VOCDetection(root=dataset_root,
                                        classes=cfg['classes'],
                                        transform=transform)
    else:
        viz_dataset = ssd.data.LabelmeDataset(root=dataset_root,
                                              classes=cfg['classes'])
        dataset = ssd.data.LabelmeDataset(root=dataset_root,
                                          classes=cfg['classes'],
                                          transform=transform)

    if logdir is not None:
        tb_writter = SummaryWriter(str(logdir), flush_secs=10)
    else:
        tb_writter = None

    model = ssd.SSD300(cfg)
    if checkpoint is not None:
        print(f'Resuming training, loading {str(checkpoint)}...')
        checkpoint = torch.load(checkpoint)
        model.load_state_dict(checkpoint)
    else:
        vgg_weights = torch.load(basenet)
        print('Loading base network...')
        model.vgg.load_state_dict(vgg_weights)

    model.to(device)
    model.train()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=lr,
                                momentum=momentum,
                                weight_decay=wd)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=step_size,
                                                   gamma=gamma)
    criterion = ssd.nn.MultiBoxLoss(0.5, 3)

    data_loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        shuffle=True,
        collate_fn=ssd.data.detection_collate,
        pin_memory=True)

    steps_per_epoch = len(dataset) // batch_size
    for epoch in range(1, epochs + 1):
        ssd.engine.train_one_epoch(model=model,
                                   optimizer=optimizer,
                                   criterion_fn=criterion,
                                   data_loader=data_loader,
                                   epoch=epoch,
                                   device=device,
                                   tb_writer=tb_writter)

        _log_predictions(model, viz_dataset, epoch, tb_writter)
        lr_scheduler.step()
        model_f = save_dir / f'{dataset.name}_{epoch}.pt'
        torch.save(model.state_dict(), model_f)
Пример #6
0
from chainer import serializers
from chainer.links.caffe import CaffeFunction

import ssd

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('source')
    parser.add_argument('target')
    parser.add_argument('--baseonly', action='store_true')
    parser.set_defaults(baseonly=False)
    args = parser.parse_args()

    caffe_model = CaffeFunction(args.source)
    model = ssd.SSD300(n_class=20, n_anchors=(4, 6, 6, 6, 4, 4))

    model.base.conv1_1.copyparams(caffe_model.conv1_1)
    model.base.conv1_2.copyparams(caffe_model.conv1_2)

    model.base.conv2_1.copyparams(caffe_model.conv2_1)
    model.base.conv2_2.copyparams(caffe_model.conv2_2)

    model.base.conv3_1.copyparams(caffe_model.conv3_1)
    model.base.conv3_2.copyparams(caffe_model.conv3_2)
    model.base.conv3_3.copyparams(caffe_model.conv3_3)

    model.base.conv4_1.copyparams(caffe_model.conv4_1)
    model.base.conv4_2.copyparams(caffe_model.conv4_2)
    model.base.conv4_3.copyparams(caffe_model.conv4_3)