예제 #1
0
def eval_ssd_r34_mlperf_coco(args):
    from coco import COCO
    # Check that GPUs are actually available
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    dboxes = dboxes_R34_coco(args.image_size,args.strides)
    encoder = Encoder(dboxes)
    val_trans = SSDTransformer(dboxes, (args.image_size[0], args.image_size[1]), val=True)

    val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
    val_coco_root = os.path.join(args.data, "val2017")

    cocoGt = COCO(annotation_file=val_annotate)
    val_coco = COCODetection(val_coco_root, val_annotate, val_trans)
    inv_map = {v:k for k,v in val_coco.label_map.items()}

    ssd_r34 = SSD_R34(val_coco.labelnum,args.strides)

    print("loading model checkpoint", args.checkpoint)
    od = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
    import pdb; pdb.set_trace()
    ssd_r34.load_state_dict(od["model"])

    if use_cuda:
        ssd_r34.cuda(args.device)
    loss_func = Loss(dboxes)
    if use_cuda:
        loss_func.cuda(args.device)

    coco_eval(ssd_r34, val_coco, cocoGt, encoder, inv_map, args.threshold,args.device,use_cuda)
예제 #2
0
def eval_ssd_r34_mlperf_coco(args):
    from coco import COCO
    # Check that GPUs are actually available
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    dboxes = dboxes_R34_coco(args.image_size, args.strides)

    encoder = Encoder(dboxes)

    val_trans = SSDTransformer(dboxes,
                               (args.image_size[0], args.image_size[1]),
                               val=True)

    if not args.dummy:
        val_annotate = os.path.join(args.data,
                                    "annotations/instances_val2017.json")
        val_coco_root = os.path.join(args.data, "val2017")

        cocoGt = COCO(annotation_file=val_annotate)
        val_coco = COCODetection(val_coco_root, val_annotate, val_trans)
        inv_map = {v: k for k, v in val_coco.label_map.items()}

        if args.accuracy_mode:
            val_dataloader = DataLoader(val_coco,
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        sampler=None,
                                        num_workers=args.workers)
        else:
            val_dataloader = DataLoader(val_coco,
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        sampler=None,
                                        num_workers=args.workers,
                                        drop_last=True)
        labelnum = val_coco.labelnum
    else:
        cocoGt = None
        encoder = None
        inv_map = None
        val_dataloader = None
        labelnum = 81

    ssd_r34 = SSD_R34(labelnum, strides=args.strides)

    if args.checkpoint:
        print("loading model checkpoint", args.checkpoint)
        od = torch.load(args.checkpoint,
                        map_location=lambda storage, loc: storage)
        ssd_r34.load_state_dict(od["model"])

    if use_cuda:
        ssd_r34.cuda(args.device)
    coco_eval(ssd_r34, val_dataloader, cocoGt, encoder, inv_map, args)
예제 #3
0
def dboxes_coco(figsize, strides):
    ssd_r34 = SSD_R34(81, strides=strides).to('cuda')
    synt_img = torch.rand([1, 3] + figsize).to('cuda')
    _, _, feat_size = ssd_r34(synt_img, extract_shapes=True)
    steps = [(int(figsize[0] / fs[0]), int(figsize[1] / fs[1]))
             for fs in feat_size]
    scales = [(int(s * figsize[0] / 300), int(s * figsize[1] / 300))
              for s in [21, 45, 99, 153, 207, 261, 315]]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    print('Total number of anchors is: ', dboxes.dboxes.shape[0])
    return dboxes
예제 #4
0
def dboxes_R34_coco(figsize, strides):
    ssd_r34 = SSD_R34(81, strides=strides)
    synt_img = torch.rand([1, 3] + figsize)
    _, _, feat_size = ssd_r34(synt_img, extract_shapes=True)
    steps = [(int(figsize[0] / fs[0]), int(figsize[1] / fs[1]))
             for fs in feat_size]
    # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
    scales = [(int(s * figsize[0] / 300), int(s * figsize[1] / 300))
              for s in [21, 45, 99, 153, 207, 261, 315]]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    return dboxes
예제 #5
0
def dboxes_R34_coco(figsize, strides):
    ssd_r34 = SSD_R34(81, strides=strides)
    synt_img = torch.rand([1, 3] + figsize)
    #if use_cude:
    #    synt_img.to('cuda')
    #    ssd_r34.to('cuda')
    _, _, feat_size = ssd_r34(synt_img, extract_shapes=True)
    print('Features size: ', feat_size)
    # import pdb; pdb.set_trace()
    steps = [(int(figsize[0] / fs[0]), int(figsize[1] / fs[1]))
             for fs in feat_size]
    # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
    scales = [(int(s * figsize[0] / 300), int(s * figsize[1] / 300))
              for s in [21, 45, 99, 153, 207, 261, 315]]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    print('dboxes from dboxes_R34_coco', dboxes)
    return dboxes
예제 #6
0
def train_mlperf_coco(args):
    from coco import COCO
    # Check that GPUs are actually available
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    ssd_r34 = SSD_R34(81, strides=args.strides)
    #img_size=[args.image_size,args.image_size]
    dboxes = dboxes_coco(args.image_size, args.strides)
    encoder = Encoder(dboxes)
    train_trans = SSDTransformer(dboxes, tuple(args.image_size), val=False)
    val_trans = SSDTransformer(dboxes, tuple(args.image_size), val=True)

    val_annotate = os.path.join(args.data,
                                "annotations/instances_val2017.json")
    val_coco_root = os.path.join(args.data, "val2017")
    train_annotate = os.path.join(args.data,
                                  "annotations/instances_train2017.json")
    train_coco_root = os.path.join(args.data, "train2017")

    cocoGt = COCO(annotation_file=val_annotate)
    val_coco = COCODetection(val_coco_root, val_annotate, val_trans)
    train_coco = COCODetection(train_coco_root, train_annotate, train_trans)

    #print("Number of labels: {}".format(train_coco.labelnum))
    train_dataloader = DataLoader(train_coco,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=4)

    ssd_r34 = SSD_R34(train_coco.labelnum, strides=args.strides)
    if args.checkpoint is not None:
        print("loading model checkpoint", args.checkpoint)
        od = torch.load(args.checkpoint)
        ssd_r34.load_state_dict(od["model"])
    ssd_r34.train()
    ssd_r34.to('cuda')
    if use_cuda:
        if args.device_ids and len(args.device_ids) > 1:
            ssd_r34 = nn.DataParallel(ssd_r34, args.device_ids)

    loss_func = Loss(dboxes)
    if use_cuda:
        loss_func.to('cuda')
        loss_func = nn.DataParallel(loss_func, args.device_ids)

    optim = torch.optim.SGD(ssd_r34.parameters(),
                            lr=1e-3,
                            momentum=0.9,
                            weight_decay=5e-4)
    print("epoch", "nbatch", "loss")

    iter_num = args.iteration
    avg_loss = 0.0
    last_loss = [0.0] * 10
    inv_map = {v: k for k, v in val_coco.label_map.items()}

    for epoch in range(args.epochs):

        for nbatch, (img, img_size, bbox,
                     label) in enumerate(train_dataloader):

            if iter_num == 160000:
                print("")
                print("lr decay step #1")
                for param_group in optim.param_groups:
                    param_group['lr'] = 1e-4

            if iter_num == 200000:
                print("")
                print("lr decay step #2")
                for param_group in optim.param_groups:
                    param_group['lr'] = 1e-5

            img = Variable(img, requires_grad=True)
            ploc, plabel, _ = ssd_r34(img.to('cuda'))
            trans_bbox = bbox.transpose(1, 2).contiguous()

            gloc, glabel = Variable(trans_bbox, requires_grad=False), \
                           Variable(label, requires_grad=False)

            loss = loss_func(ploc, plabel, gloc, glabel).mean()

            if not np.isinf(loss.item()):
                avg_loss = 0.999 * avg_loss + 0.001 * loss.item()
            last_loss.pop()
            last_loss = [loss.item()] + last_loss
            avg_last_loss = sum(last_loss) / len(last_loss)
            print("Iteration: {:6d}, Loss function: {:5.3f}, Average Loss: {:.3f}, Average Last 10 Loss: {:.3f}"\
                        .format(iter_num, loss.item(), avg_loss,avg_last_loss), end="\r")
            optim.zero_grad()
            loss.backward()
            optim.step()

            loss = None

            if iter_num in args.evaluation:
                if not args.no_save:
                    print("")
                    print("saving model...")
                    module = ssd_r34.module if len(
                        args.device_ids) > 1 else ssd_r34
                    torch.save(
                        {
                            "model": module.state_dict(),
                            "label_map": train_coco.label_info
                        }, args.save_path + "/iter_{}.pt".format(iter_num))
                if coco_eval(ssd_r34, val_coco, cocoGt, encoder, inv_map,
                             args.threshold, args.device_ids):
                    return

            iter_num += 1
예제 #7
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.


import torch
import numpy as np



# from ssd_mobilenet_v1 import create_mobilenetv1_ssd
from ssd_r34 import SSD_R34


model = SSD_R34()
model.eval()


image = torch.rand(1,3,1200,1200)


results = model(image)