Ejemplo n.º 1
0
def gluon2torch(name,
                gluon_path,
                torch_path,
                base=False,
                reorder=False,
                force_pair=None):
    name = name.lower()
    if base:
        torch_model = model_zoo.get_model(name,
                                          pretrained=False,
                                          pretrained_base=False,
                                          root=torch_path)
        gluon_model = get_model(name,
                                pretrained=True,
                                pretrained_base=False,
                                root=gluon_path)
    else:
        torch_model = model_zoo.get_model(name,
                                          pretrained=False,
                                          root=torch_path)
        gluon_model = get_model(name, pretrained=True, root=gluon_path)
    torch_keys = [
        k for k in torch_model.state_dict().keys()
        if not k.endswith('num_batches_tracked')
    ]
    gluon_keys = gluon_model.collect_params().keys()
    if reorder:
        key_words = ('running_mean', 'running_var', 'moving_mean',
                     'moving_var')
        if force_pair is not None:
            torch_keys = [k for k in torch_keys if not k.endswith(key_words) and not k.startswith(force_pair[1])] + \
                         [k for k in torch_keys if not k.endswith(key_words) and k.startswith(force_pair[1])] + \
                         [k for k in torch_keys if k.endswith(key_words) and not k.startswith(force_pair[1])] + \
                         [k for k in torch_keys if k.endswith(key_words) and k.startswith(force_pair[1])]

            gluon_keys = [k for k in gluon_keys if not k.endswith(key_words) and not k.startswith(force_pair[0])] + \
                         [k for k in gluon_keys if not k.endswith(key_words) and k.startswith(force_pair[0])] + \
                         [k for k in gluon_keys if k.endswith(key_words) and not k.startswith(force_pair[0])] + \
                         [k for k in gluon_keys if k.endswith(key_words) and k.startswith(force_pair[0])]
        else:
            torch_keys = [k for k in torch_keys if not k.endswith(key_words)] + \
                         [k for k in torch_keys if k.endswith(key_words)]

            gluon_keys = [k for k in gluon_keys if not k.endswith(key_words)] + \
                         [k for k in gluon_keys if k.endswith(key_words)]

    assert len(torch_keys) == len(gluon_keys)
    map = dict(zip(gluon_keys, torch_keys))
    pytorch_model_params = {}
    print('Convert Gluon Model to PyTorch Model ...')
    for key, value in gluon_model.collect_params().items():
        tensor = torch.from_numpy(value.data().asnumpy())
        tensor.require_grad = True
        pytorch_model_params[map[key]] = tensor
    torch.save(pytorch_model_params, os.path.join(torch_path, name + '.pth'))
    print('Finish')
Ejemplo n.º 2
0
def parse_network(network, outputs, pretrained, **kwargs):
    """Parse network with specified outputs and other arguments.

    Parameters
    ----------
    network : str or nn.Module
        Logic chain: load from gluoncv.model_zoo if network is string.
        Convert to Symbol if network is HybridBlock
    outputs : str or iterable of str
        The name of layers to be extracted as features.
    pretrained : bool
        Use pretrained parameters as in model_zoo

    Returns
    -------
    results: list of nn.Module (the same size as len(outputs))

    """
    l, n = len(outputs), len(outputs[0])
    results = [[] for _ in range(l)]
    if isinstance(network, str):
        from model.model_zoo import get_model
        network = get_model(network, pretrained=pretrained, **kwargs).features

    # helper func
    def recursive(pos, block, arr, j):
        if j == n:
            results[pos].append([block])
            return
        child = list(block.children())
        results[pos].append(child[:arr[j]])
        if pos + 1 < l: results[pos + 1].append(child[arr[j] + 1:])
        recursive(pos, child[arr[j]], arr, j + 1)

    block = list(network.children())

    for i in range(l):
        pos = outputs[i][0]
        if i == 0:
            results[i].append(block[:pos])
        elif i < l:
            results[i].append(block[outputs[i - 1][0] + 1:pos])
        recursive(i, block[pos], outputs[i], 1)

    for i in range(l):
        results[i] = nn.Sequential(
            *[item for sub in results[i] for item in sub if sub])
    return results
Ejemplo n.º 3
0
    def __init__(self,
                 base_name='resnet50_v1b',
                 pretrained_base=False,
                 num_joints=17,
                 num_deconv_layers=3,
                 num_deconv_filters=(2048, 256, 256, 256),
                 num_deconv_kernels=(4, 4, 4),
                 final_conv_kernel=1,
                 deconv_with_bias=False,
                 **kwargs):
        super(SimplePoseResNet, self).__init__(**kwargs)

        from model.model_zoo import get_model
        base_network = get_model(base_name, pretrained=pretrained_base)

        self.resnet = list()
        for layer in ['features']:
            self.resnet.append(getattr(base_network, layer))
        # if base_name.endswith('v1'):
        #     for layer in ['features']:
        #         self.resnet.append(getattr(base_network, layer))
        # else:
        #     for layer in ['conv1', 'bn1', 'relu', 'maxpool',
        #                   'layer1', 'layer2', 'layer3', 'layer4']:
        #         self.resnet.append(getattr(base_network, layer))
        self.resnet = nn.Sequential(*self.resnet)

        self.deconv_with_bias = deconv_with_bias

        # used for deconv layers
        self.deconv_layers = self._make_deconv_layer(
            num_deconv_layers,
            num_deconv_filters,
            num_deconv_kernels,
        )

        self.final_layer = nn.Conv2d(
            num_deconv_filters[-1],
            num_joints,
            kernel_size=final_conv_kernel,
            stride=1,
            padding=1 if final_conv_kernel == 3 else 0,
        )
Ejemplo n.º 4
0
def train(cfg, local_rank, distributed):
    pretrained_base = os.path.join(cfg.TRAIN.model_root,
                                   cfg.TRAIN.backbone + '.pth')
    model = get_model(cfg.MODEL.model, pretrained_base=pretrained_base)
    device = torch.device(cfg.MODEL.device)
    model.to(device)
    if distributed:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    optimizer = make_optimizer(cfg, model)
    scheduler = make_lr_scheduler(cfg, optimizer)

    if distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model,
            device_ids=[local_rank],
            output_device=local_rank,
        )

    arguments = {}
    arguments["iteration"] = 0

    output_dir = cfg.CONFIG.output_dir

    save_to_disk = ptutil.get_rank() == 0
    checkpointer = ptutil.CheckPointer(model, optimizer, scheduler, output_dir,
                                       save_to_disk)
    extra_checkpoint_data = checkpointer.load(cfg.TRAIN.weight)
    arguments.update(extra_checkpoint_data)

    data_loader = build_dataloader(
        cfg,
        train=True,
        distributed=distributed,
        start_iter=arguments["iteration"],
    )

    checkpoint_period = cfg.TRAIN.checkpoint_period

    training(model, data_loader, optimizer, scheduler, checkpointer, device,
             checkpoint_period, arguments)

    return model
Ejemplo n.º 5
0
def gluon2torch(name, gluon_path, torch_path, num):
    name = name.lower()
    torch_model = model_zoo.get_model(name, pretrained=False, root=torch_path)
    gluon_model = get_model(name, pretrained=True, root=gluon_path)
    torch_keys = [k for k in torch_model.state_dict().keys() if not k.endswith('num_batches_tracked')]
    gluon_keys = gluon_model.collect_params().keys()
    assert len(torch_keys) == len(gluon_keys)

    map = dict(zip(gluon_keys, torch_keys))
    pytorch_model_params = {}
    print('Convert Gluon Model to PyTorch Model ...')
    for i, ((key, value), (key2, value2)) in enumerate(
            zip(gluon_model.collect_params().items(), torch_model.state_dict().items())):
        if i < num:
            tensor = torch.from_numpy(value.data().asnumpy())
            tensor.require_grad = True
            pytorch_model_params[map[key]] = tensor
        else:
            pytorch_model_params[map[key]] = value2

    torch.save(pytorch_model_params, os.path.join(torch_path, name + '.pth'))
    print('Finish')
Ejemplo n.º 6
0
                        default=os.path.expanduser('~/.torch/models'),
                        help='Default pre-trained mdoel root.')
    opt = parser.parse_args()
    return opt


if __name__ == '__main__':
    opt = parse_args()
    device = torch.device('cpu')
    if opt.cuda:
        device = torch.device('cuda:0')
    # Load Model
    model_name = opt.model
    pretrained = True if opt.saved_params == '' else False
    model = get_model(model_name,
                      pretrained=pretrained,
                      pretrained_base=False,
                      root=opt.root).to(device)
    model.eval()

    # Load Images
    if opt.input_pic is None:
        img_map = {
            'voc': 'voc_example.jpg',
            'ade': 'ade_example.jpg',
            'coco': 'voc_example.jpg',
            'citys': 'city_example.jpg'
        }
        opt.input_pic = os.path.join(
            cur_path, '../png/' + img_map[model_name.split('_')[-1]])
    img = Image.open(opt.input_pic)
Ejemplo n.º 7
0
        torch.backends.cudnn.benchmark = False if args.mode == 'testval' else True
        device = torch.device('cuda')
    else:
        distributed = False

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method=args.init_method)

    # Load Model
    model = model_zoo.get_model(args.model_name,
                                pretrained=True,
                                pretrained_base=False,
                                base_size=args.base_size,
                                crop_size=args.crop_size,
                                root=args.root,
                                aux=args.aux,
                                dilated=args.dilated,
                                jpu=args.jpu)
    model.keep_shape = True if args.mode == 'testval' else False
    model.to(device)

    # testing data
    input_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
    ])

    data_kwargs = {
        'base_size': args.base_size,
Ejemplo n.º 8
0
        device = torch.device('cuda')
    else:
        distributed = False

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method=args.init_method)

    # Load Model
    model_name = args.network
    if args.pretrained.lower() in ['true', '1', 'yes', 't']:
        pretrained = True
    else:
        pretrained = False
    kwargs = {'classes': 10, 'pretrained': pretrained, 'root': args.root}
    net = model_zoo.get_model(model_name, **kwargs)
    net.to(device)

    # testing data
    val_metric = Accuracy()
    val_data = get_dataloader(args.batch_size, args.num_workers,
                              args.data_root, distributed)

    # testing
    metric = validate(net, val_data, device, val_metric)
    synchronize()
    name, value = accumulate_metric(metric)
    if is_main_process():
        print(name, value)
Ejemplo n.º 9
0
    distributed = num_gpus > 1
    if args.cuda and torch.cuda.is_available():
        cudnn.benchmark = True
        device = torch.device('cuda')
    else:
        distributed = False

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl", init_method=args.init_method)

    # network
    net_name = '_'.join((args.algorithm, args.network, args.dataset))
    args.save_prefix += net_name
    if args.pretrained.lower() in ['true', '1', 'yes', 't']:
        net = model_zoo.get_model(net_name, pretrained=True)
    else:
        net = model_zoo.get_model(net_name, pretrained=False)
        net.load_parameters(args.pretrained.strip())

    net.to(device)
    net.set_nms(nms_thresh=0.45, nms_topk=400)

    # testing data
    val_dataset, val_metric = get_dataset(args.dataset, args.data_shape)
    val_data = get_dataloader(val_dataset, args.batch_size, args.num_workers, distributed, args.dataset == 'coco')
    classes = val_dataset.classes  # class names

    # testing
    val_metric = validate(net, val_data, device, val_metric, args.dataset == 'coco')
    synchronize()
Ejemplo n.º 10
0
                        help='demo with GPU')
    parser.add_argument('--root', type=str, default=os.path.expanduser('~/.torch/models'),
                        help='Default pre-trained mdoel root.')
    parser.add_argument('--pretrained', type=str, default='True',
                        help='Load weights from previously saved parameters.')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    device = torch.device('cpu')
    if args.cuda:
        device = torch.device('cuda')
    image = args.images
    net = get_model(args.network, pretrained=True)
    net.to(device)
    net.set_nms(0.3, 200)
    net.eval()

    ax = None
    x, img = load_test(image, short=net.short, max_size=net.max_size)
    x = x.to(device)
    with torch.no_grad():
        ids, scores, bboxes, masks = [xx.cpu().numpy() for xx in net(x)]
    masks = expand_mask(masks, bboxes, (img.shape[1], img.shape[0]), scores)
    img = plot_mask(img, masks)
    fig = plt.figure(figsize=(15, 15))
    ax = fig.add_subplot(1, 1, 1)
    ax = plot_bbox(img, bboxes, scores, ids,
                   class_names=net.classes, ax=ax)
Ejemplo n.º 11
0
import time
import numpy as np
import mxnet as mx
from gluoncv.utils import viz
from mxnet import gpu
from mxnet.ndarray import concat
from model import model_zoo
from os.path import basename
import gluoncv as gcv

classes = ['text']
imagePath = ''
netName = 'textboxes_512_mobilenet1.0_custom'
path_to_model = 'textBoxes_512_mobilenet1.0_text_ICDAR_new.params'
gpu_ind = 0
output_path = ''

net, _ = model_zoo.get_model(netName, classes=classes, pretrained_base=False)
net.reset_class(classes)
net.load_parameters(path_to_model)

start = time.time()
x, image = gcv.data.transforms.presets.ssd.load_test(imagePath,
                                                     scale,
                                                     max_size=2048)
cid, score, bbox = net(y)
ax = viz.plot_bbox(image, bbox[0], score[0], cid[0], class_names=classes)
base_image_name = basename(imagePath)
plt.savefig(output_path + base_image_name)
print(time.time() - start)
Ejemplo n.º 12
0
    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl", init_method=args.init_method)

    # network
    kwargs = {}
    module_list = []
    if args.use_fpn:
        module_list.append('fpn')
    if args.norm_layer is not None:
        module_list.append(args.norm_layer)
    net_name = '_'.join(('faster_rcnn', *module_list, args.network, args.dataset))
    args.save_prefix += net_name
    if args.pretrained.lower() in ['true', '1', 'yes', 't']:
        net = model_zoo.get_model(net_name, pretrained=True, root=args.root, **kwargs)
    else:
        net = model_zoo.get_model(net_name, pretrained=False, **kwargs)
        net.load_state_dict(args.pretrained.strip())

    net.to(device)

    # testing data
    val_dataset, val_metric = get_dataset(net.short, net.max_size, args.dataset)
    val_data = get_dataloader(val_dataset, args.batch_size, args.num_workers,
                              distributed, args.dataset == 'coco')
    classes = val_dataset.classes  # class names

    # testing
    val_metric = validate(net, val_data, device, val_metric, args.dataset == 'coco')
    synchronize()
Ejemplo n.º 13
0
    if args.cuda and torch.cuda.is_available():
        cudnn.benchmark = True
        device = torch.device('cuda')
    else:
        distributed = False

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method=args.init_method)

    input_size = [int(i) for i in args.input_size.split(',')]
    val_list = get_dataloader(args.data_dir, args.batch_size, args.num_workers,
                              input_size, args.mean, args.std, distributed)
    val_metric = COCOKeyPointsMetric(val_list[0],
                                     'coco_keypoints',
                                     data_shape=tuple(input_size),
                                     in_vis_thresh=args.score_threshold,
                                     cleanup=True)
    use_pretrained = True if not args.params_file else False
    net = model_zoo.get_model(args.model,
                              num_joints=args.num_joints,
                              pretrained=use_pretrained).to(device)
    net.eval()

    val_metric = validate(val_list, net, val_metric, device, args.flip_test)
    synchronize()
    name, value = accumulate_metric(val_metric)
    if is_main_process():
        print(name, value)
Ejemplo n.º 14
0
    def __init__(self, args):
        self.device = torch.device(args.device)
        self.save_prefix = '_'.join((args.model, args.backbone, args.dataset))
        # image transform
        input_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
        ])
        # dataset and dataloader
        data_kwargs = {
            'transform': input_transform,
            'base_size': args.base_size,
            'crop_size': args.crop_size
        }
        trainset = get_segmentation_dataset(args.dataset,
                                            split=args.train_split,
                                            mode='train',
                                            **data_kwargs)
        args.per_iter = len(trainset) // (args.num_gpus * args.batch_size)
        args.max_iter = args.epochs * args.per_iter
        if args.distributed:
            sampler = data.DistributedSampler(trainset)
        else:
            sampler = data.RandomSampler(trainset)
        train_sampler = data.sampler.BatchSampler(sampler, args.batch_size,
                                                  True)
        train_sampler = IterationBasedBatchSampler(
            train_sampler, num_iterations=args.max_iter)
        self.train_loader = data.DataLoader(trainset,
                                            batch_sampler=train_sampler,
                                            pin_memory=True,
                                            num_workers=args.workers)
        if not args.skip_eval or 0 < args.eval_epochs < args.epochs:
            valset = get_segmentation_dataset(args.dataset,
                                              split='val',
                                              mode='val',
                                              **data_kwargs)
            val_sampler = make_data_sampler(valset, False, args.distributed)
            val_batch_sampler = data.sampler.BatchSampler(
                val_sampler, args.test_batch_size, False)
            self.valid_loader = data.DataLoader(
                valset,
                batch_sampler=val_batch_sampler,
                num_workers=args.workers,
                pin_memory=True)

        # create network
        if args.model_zoo is not None:
            self.net = get_model(args.model_zoo, pretrained=True)
        else:
            kwargs = {'oc': args.oc} if args.model == 'ocnet' else {}
            self.net = get_segmentation_model(model=args.model,
                                              dataset=args.dataset,
                                              backbone=args.backbone,
                                              aux=args.aux,
                                              dilated=args.dilated,
                                              jpu=args.jpu,
                                              crop_size=args.crop_size,
                                              **kwargs)
        if args.distributed:
            self.net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.net)
        self.net.to(self.device)
        # resume checkpoint if needed
        if args.resume is not None:
            if os.path.isfile(args.resume):
                self.net.load_state_dict(torch.load(args.resume))
            else:
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))

        # create criterion
        if args.ohem:
            min_kept = args.batch_size * args.crop_size**2 // 16
            self.criterion = OHEMSoftmaxCrossEntropyLoss(thresh=0.7,
                                                         min_kept=min_kept,
                                                         use_weight=False)
        else:
            self.criterion = MixSoftmaxCrossEntropyLoss(
                args.aux, aux_weight=args.aux_weight)

        # optimizer and lr scheduling
        params_list = [{
            'params': self.net.base1.parameters(),
            'lr': args.lr
        }, {
            'params': self.net.base2.parameters(),
            'lr': args.lr
        }, {
            'params': self.net.base3.parameters(),
            'lr': args.lr
        }]
        if hasattr(self.net, 'others'):
            for name in self.net.others:
                params_list.append({
                    'params':
                    getattr(self.net, name).parameters(),
                    'lr':
                    args.lr * 10
                })
        if hasattr(self.net, 'JPU'):
            params_list.append({
                'params': self.net.JPU.parameters(),
                'lr': args.lr * 10
            })
        self.optimizer = optim.SGD(params_list,
                                   lr=args.lr,
                                   momentum=args.momentum,
                                   weight_decay=args.weight_decay)
        self.scheduler = WarmupPolyLR(self.optimizer,
                                      T_max=args.max_iter,
                                      warmup_factor=args.warmup_factor,
                                      warmup_iters=args.warmup_iters,
                                      power=0.9)

        if args.distributed:
            self.net = torch.nn.parallel.DistributedDataParallel(
                self.net,
                device_ids=[args.local_rank],
                output_device=args.local_rank)

        # evaluation metrics
        self.metric = SegmentationMetric(trainset.num_class)
        self.args = args
Ejemplo n.º 15
0
                        help='demo with GPU')
    parser.add_argument('--input-pic', type=str, default=os.path.join(cur_path, '../png/mt_baker.jpg'),
                        help='path to the input picture')
    opt = parser.parse_args()
    return opt


if __name__ == '__main__':
    opt = parse_args()
    device = torch.device('cpu')
    if opt.cuda:
        device = torch.device('cuda:0')
    # Load Model
    model_name = opt.model
    pretrained = True if opt.saved_params == '' else False
    net = get_model(model_name, pretrained=pretrained, root=opt.root).to(device)
    net.eval()

    # Load Images
    img = Image.open(opt.input_pic)

    # Transform
    transform_fn = transforms.Compose([
        transforms.Resize(256 if model_name.lower() != 'inceptionv3' else 299),
        transforms.CenterCrop(224 if model_name.lower() != 'inceptionv3' else 299),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    img = transform_fn(img).to(device)
    with torch.no_grad():
Ejemplo n.º 16
0
                        default='../configs/retina_resnet101_v1b_coco.yaml')
    parser.add_argument('--images', type=str, default=os.path.join(cur_path, '../png/biking.jpg'),
                        help='Test images.')

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.freeze()
    device = torch.device(cfg.MODEL.device)
    device_cpu = torch.device('cpu')
    image = args.images
    net = get_model(cfg.CONFIG.model, pretrained=cfg.TEST.pretrained)
    net.to(device)
    net.eval()

    ax = None
    x, img = load_test(image, min_image_size=800)
    x = to_image_list(x, 32)
    x = x.to(device)
    with torch.no_grad():
        predictions = net(x)
    predictions = [o.to(device_cpu) for o in predictions]
    prediction = predictions[0]
    height, width = img.shape[:-1]
    prediction = prediction.resize((width, height))
    top_predictions = select_top_predictions(prediction, conf_thresh=cfg.TEST.vis_thresh)
Ejemplo n.º 17
0
    opt = parser.parse_args()
    return opt


if __name__ == '__main__':
    opt = parse_args()
    device = torch.device('cpu')
    if opt.cuda:
        device = torch.device('cuda')
    # Load Model
    model_name = opt.model
    pretrained = True if opt.saved_params == '' else False
    model = get_model(model_name,
                      pretrained=pretrained,
                      pretrained_base=False,
                      aux=opt.aux,
                      dilated=opt.dilated,
                      jpu=opt.jpu,
                      root=opt.root).to(device)
    model.eval()

    # Load Images
    if opt.input_pic is None:
        img_map = {'voc': 'voc_example.jpg', 'citys': 'city_example.jpg'}
        opt.input_pic = os.path.join(
            cur_path, 'png/' + img_map[model_name.split('_')[-1]])
    img = Image.open(opt.input_pic)

    # Transform
    transform_fn = transforms.Compose([
        transforms.ToTensor(),
Ejemplo n.º 18
0
                        help='demo with GPU')
    parser.add_argument('--root', type=str, default=os.path.expanduser('~/.torch/models'),
                        help='Default pre-trained model root')
    parser.add_argument('--input-pic', type=str, default=os.path.join(cur_path, '../png/soccer.png'),
                        help='path to the input picture')
    opt = parser.parse_args()
    return opt


if __name__ == '__main__':
    opt = parse_args()
    device = torch.device('cpu')
    if opt.cuda:
        device = torch.device('cuda')

    detector = get_model(opt.detector, pretrained=True).to(device)
    detector.reset_class(["person"], reuse_weights=['person'])
    pose_net = get_model(opt.pose_model, pretrained=True).to(device)
    detector.eval()
    pose_net.eval()

    x, img = load_test(opt.input_pic, short=512)
    x = x.to(device)
    with torch.no_grad():
        class_IDs, scores, bounding_boxs = detector(x)
        pose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs, device=device)
        predicted_heatmap = pose_net(pose_input)
        pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox)

    plot_keypoints(img, pred_coords, confidence, class_IDs, bounding_boxs, scores,
                   box_thresh=0.5, keypoint_thresh=0.2)
Ejemplo n.º 19
0

if __name__ == '__main__':
    # config
    classes = 10
    class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
                   'dog', 'frog', 'horse', 'ship', 'truck']
    args = parse_args()
    device = torch.device('cpu')
    if args.cuda:
        device = torch.device('cuda:0')
    # Load Model
    model_name = args.model
    pretrained = True if args.saved_params == '' else False
    kwargs = {'classes': classes, 'pretrained': pretrained, 'root': args.root, }
    net = get_model(model_name, **kwargs).to(device)
    net.eval()

    # Load Images
    img = Image.open(args.input_pic)

    # Transform
    transform_fn = transforms.Compose([
        transforms.Resize(32),
        transforms.CenterCrop(32),
        transforms.ToTensor(),
        transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
    ])

    img = transform_fn(img).to(device)
    with torch.no_grad():
Ejemplo n.º 20
0
    parser.add_argument(
        '--thresh',
        type=float,
        default=0.5,
        help='Threshold of object score when visualize the bboxes.')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    device = torch.device('cpu')
    if args.cuda:
        device = torch.device('cuda')
    image = args.images
    net = get_model(args.network, pretrained=True, root=args.root)
    net.to(device)
    net.set_nms(0.45, 200)
    net.eval()

    ax = None
    x, img = load_test(image, short=512)
    x = x.to(device)
    with torch.no_grad():
        ids, scores, bboxes = [xx[0].cpu().numpy() for xx in net(x)]
    ax = plot_bbox(img,
                   bboxes,
                   scores,
                   ids,
                   thresh=args.thresh,
                   class_names=net.classes,
Ejemplo n.º 21
0
dataset = gcv.data.LstDetection(LSTpath, root=images_root)
print(dataset)
image = dataset[0][0]
label = dataset[0][1]
print('label:', label)

# display image and label
ax = viz.plot_bbox(image,
                   bboxes=label[:, :4],
                   labels=label[:, 4:5],
                   class_names=classes)
plt.savefig('labeled_image.jpg')

#initalize model
net, input_size = model_zoo.get_model(netName,
                                      pretrained=False,
                                      classes=classes)
if finetune_model == '':
    net.initialize()
    net.reset_class(classes)
else:
    net.load_parameters(path_to_model)
    net.reset_class(classes)
print(net)

train_data = get_dataloader(net, dataset, input_size, batch_size, 0)

#############################################################################################
# Try use GPU for training

try:
Ejemplo n.º 22
0
    def __init__(self, args):
        self.device = torch.device(args.device)
        # network
        net_name = '_'.join(('yolo3', args.network, args.dataset))
        self.save_prefix = net_name
        self.net = get_model(net_name, pretrained_base=True)
        if args.distributed:
            self.net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.net)
        if args.resume.strip():
            logger.info("Resume from the model {}".format(args.resume))
            self.net.load_state_dict(torch.load(args.resume.strip()))
        else:
            logger.info("Init from base net {}".format(args.network))
        classes, anchors = self.net.num_class, self.net.anchors
        self.net.set_nms(nms_thresh=0.45, nms_topk=400)
        if args.label_smooth:
            self.net._target_generator._label_smooth = True
        self.net.to(self.device)
        if args.distributed:
            self.net = torch.nn.parallel.DistributedDataParallel(
                self.net, device_ids=[args.local_rank], output_device=args.local_rank)

        # dataset and dataloader
        train_dataset = get_train_data(args.dataset, args.mixup)
        width, height = args.data_shape, args.data_shape
        batchify_fn = Tuple(*([Stack() for _ in range(6)] + [Pad(axis=0, pad_val=-1) for _ in range(1)]))
        train_dataset = train_dataset.transform(
            YOLO3DefaultTrainTransform(width, height, classes, anchors, mixup=args.mixup))
        args.per_iter = len(train_dataset) // (args.num_gpus * args.batch_size)
        args.max_iter = args.epochs * args.per_iter
        if args.distributed:
            sampler = data.DistributedSampler(train_dataset)
        else:
            sampler = data.RandomSampler(train_dataset)
        train_sampler = data.sampler.BatchSampler(sampler=sampler, batch_size=args.batch_size,
                                                  drop_last=False)
        train_sampler = IterationBasedBatchSampler(train_sampler, num_iterations=args.max_iter)
        if args.no_random_shape:
            self.train_loader = data.DataLoader(train_dataset, batch_sampler=train_sampler, pin_memory=True,
                                                collate_fn=batchify_fn, num_workers=args.num_workers)
        else:
            transform_fns = [YOLO3DefaultTrainTransform(x * 32, x * 32, classes, anchors, mixup=args.mixup)
                             for x in range(10, 20)]
            self.train_loader = RandomTransformDataLoader(transform_fns, train_dataset, batch_sampler=train_sampler,
                                                          collate_fn=batchify_fn, num_workers=args.num_workers)
        if args.eval_epoch > 0:
            # TODO: rewrite it
            val_dataset, self.metric = get_test_data(args.dataset)
            val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
            val_dataset = val_dataset.transform(YOLO3DefaultValTransform(width, height))
            val_sampler = make_data_sampler(val_dataset, False, args.distributed)
            val_batch_sampler = data.BatchSampler(val_sampler, args.test_batch_size, False)
            self.val_loader = data.DataLoader(val_dataset, batch_sampler=val_batch_sampler,
                                              collate_fn=val_batchify_fn, num_workers=args.num_workers)

        # optimizer and lr scheduling
        self.optimizer = optim.SGD(self.net.parameters(), lr=args.lr, momentum=args.momentum,
                                   weight_decay=args.wd)
        if args.lr_mode == 'cos':
            self.scheduler = WarmupCosineLR(optimizer=self.optimizer, T_max=args.max_iter,
                                            warmup_factor=args.warmup_factor, warmup_iters=args.warmup_iters)
        elif args.lr_mode == 'step':
            lr_decay = float(args.lr_decay)
            milestones = sorted([float(ls) * args.per_iter for ls in args.lr_decay_epoch.split(',') if ls.strip()])
            self.scheduler = WarmupMultiStepLR(optimizer=self.optimizer, milestones=milestones, gamma=lr_decay,
                                               warmup_factor=args.warmup_factor, warmup_iters=args.warmup_iters)
        else:
            raise ValueError('illegal scheduler type')
        self.args = args
Ejemplo n.º 23
0
from torch import nn


def net_xavier_uniform_init(net):
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            nn.init.xavier_uniform_(m.weight)
            nn.init.zeros_(m.bias)


if __name__ == '__main__':
    from model.model_zoo import get_model
    net_name = 'ssd_512_mobilenet1.0_coco'
    net = get_model(net_name, pretrained_base=True)
    net_xavier_uniform_init(net)