Beispiel #1
0
def main(model, res=(512, ), pyramids=None, up_pyramid=False, max_depth=None):
    from hyperseg.utils.obj_factory import obj_factory
    from hyperseg.utils.utils import set_device
    from hyperseg.utils.img_utils import create_pyramid

    assert len(
        res
    ) <= 2, f'res must be either a single number or a pair of numbers: "{res}"'
    res = res * 2 if len(res) == 1 else res

    device, gpus = set_device()
    model = obj_factory(model).to(device)

    x = torch.rand(1, 3, *res).to(device)
    x = create_pyramid(x, pyramids) if pyramids is not None else x
    if up_pyramid:
        x.append(
            F.interpolate(x[0],
                          scale_factor=2,
                          mode='bilinear',
                          align_corners=False))  # Upsample x2

    # Run profile
    flops_summary, params_summary, meta_params_summary = profile(
        model, inputs=(x, ), max_depth=max_depth)
    print_summary(flops_summary, params_summary, meta_params_summary)
Beispiel #2
0
def main(model="efficientnet_custom_02_scales.efficientnet('efficientnet-b0')", res=256):
    import torch
    from hyperseg.utils.obj_factory import obj_factory
    from hyperseg.utils.utils import set_device

    device, gpus = set_device()
    model = obj_factory(model).to(device)
    x = torch.rand(4, 3, res, res).to(device)
    pred = model(x)
    print(pred.__class__)
Beispiel #3
0
def main(model='hyperseg.models.layers.meta_linear.MetaLinear',
         in_features=3,
         out_features=5):
    from hyperseg.utils.obj_factory import obj_factory
    from hyperseg.utils.utils import set_device

    device, gpus = set_device()
    model = obj_factory(model,
                        in_features=in_features,
                        out_features=out_features).to(device)
    print(model)
    x = torch.rand(2, in_features).to(device)
    w = torch.ones(2, out_features * in_features).to(device)
    out = model(x, w)
    print(out.shape)
Beispiel #4
0
def main(model='hyperseg.models.hyperseg_v0_1.hyperseg_efficientnet',
         res=(512, ),
         pyramids=None,
         train=False):
    from hyperseg.utils.obj_factory import obj_factory
    from hyperseg.utils.utils import set_device
    from hyperseg.utils.img_utils import create_pyramid

    assert len(
        res
    ) <= 2, f'res must be either a single number or a pair of numbers: "{res}"'
    res = res * 2 if len(res) == 1 else res

    device, gpus = set_device()
    model = obj_factory(model).to(device).train(train)
    x = torch.rand(2, 3, *res).to(device)
    x = create_pyramid(x, pyramids) if pyramids is not None else x
    pred = model(x)
    print(pred.shape)
def main(model='hyperseg.models.hyperseg_v1_0.hypergen_efficientnet', res=(512,),
         pyramids=None,
         train=False):
    from hyperseg.utils.obj_factory import obj_factory
    from hyperseg.utils.utils import set_device
    from hyperseg.utils.img_utils import create_pyramid
    from tqdm import tqdm

    assert len(res) <= 2, f'res must be either a single number or a pair of numbers: "{res}"'
    res = res * 2 if len(res) == 1 else res

    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True
    device, gpus = set_device()
    model = obj_factory(model).to(device).train(train)
    x = torch.rand(1, 3, *res).to(device)
    x = create_pyramid(x, pyramids) if pyramids is not None else x
    pred = model(x)
    print(pred.shape)
Beispiel #6
0
def main(model, res=(512, ), pyramids=None, max_depth=None):
    from hyperseg.utils.obj_factory import obj_factory
    from hyperseg.utils.utils import set_device
    from hyperseg.utils.img_utils import create_pyramid

    assert len(
        res
    ) <= 2, f'res must be either a single number or a pair of numbers: "{res}"'
    res = res * 2 if len(res) == 1 else res

    device, gpus = set_device()
    model = obj_factory(model).to(device)

    x = torch.rand(1, 3, *res).to(device)
    x = create_pyramid(x, pyramids) if pyramids is not None else x

    # Run profile
    flops_summary, params_summary = profile(model,
                                            inputs=(x, ),
                                            max_depth=max_depth)
    print_summary(flops_summary, params_summary)
Beispiel #7
0
def main(model='hyperseg.models.layers.meta_conv.MetaConv2d(kernel_size=3)', in_channels=10, out_channels=20,
         padding=0, test_fps=False):
    from hyperseg.utils.obj_factory import obj_factory
    from hyperseg.utils.utils import set_device
    import time
    from tqdm import tqdm

    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True
    device, gpus = set_device()
    model = obj_factory(model, in_channels=in_channels, out_channels=out_channels).to(device)
    patch_model = MetaPatch(model, padding=padding)

    x = torch.rand(2, in_channels, 256, 256).to(device)
    w = torch.ones(2, model.hyper_params, 8, 8).to(device)
    out = patch_model(x, w)
    print(out.shape)

    if test_fps:
        total_time = 0.
        total_iterations = 0
        pbar = tqdm(range(1000), unit='frames')
        for i in pbar:
            # Start measuring time
            torch.cuda.synchronize()
            start_time = time.perf_counter()

            out = patch_model(x[:1], w[:1])

            # Stop measuring time
            torch.cuda.synchronize()
            elapsed_time = time.perf_counter() - start_time
            total_time += elapsed_time
            total_iterations += out.shape[0]
            fps = total_iterations / total_time

            # Update progress bar info
            pbar.set_description(f'fps = {fps}')
Beispiel #8
0
def main(
    # General arguments
    exp_dir,
    model=d('model'),
    gpus=d('gpus'),
    cpu_only=d('cpu_only'),
    workers=d('workers'),
    batch_size=d('batch_size'),
    arch=d('arch'),
    display_worst=d('display_worst'),
    display_best=d('display_best'),
    display_sources=d('display_sources'),
    display_with_input=d('display_with_input'),
    display_alpha=d('display_alpha'),
    display_background_index=d('display_background_index'),
    forced=d('forced'),

    # Data arguments
    test_dataset=d('test_dataset'),
    img_transforms=d('img_transforms'),
    tensor_transforms=d('tensor_transforms')):
    # Validation
    assert os.path.isdir(
        exp_dir), f'exp_dir "{exp_dir}" must be a path to a directory'
    model = 'model_best.pth' if model is None else model
    model = os.path.join(exp_dir,
                         model) if not os.path.isfile(model) else model
    assert os.path.isfile(model), f'model path "{model}" does not exist'

    # Initialize cache directory
    cache_dir = os.path.join(exp_dir,
                             os.path.splitext(os.path.basename(__file__))[0])
    scores_path = os.path.join(cache_dir, 'scores.npz')
    os.makedirs(cache_dir, exist_ok=True)

    # Initialize device
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True
    device, gpus = set_device(gpus, not cpu_only)

    # Load segmentation model
    model = load_model(model, 'segmentation', device, arch)

    # Support multiple GPUs
    if gpus and len(gpus) > 1:
        model = nn.DataParallel(model, gpus)

    # Initialize transforms
    img_transforms = obj_factory(
        img_transforms) if img_transforms is not None else []
    tensor_transforms = obj_factory(
        tensor_transforms) if tensor_transforms is not None else []
    test_transforms = Compose(img_transforms + tensor_transforms)

    # Initialize dataset
    test_dataset = obj_factory(test_dataset, transforms=test_transforms)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             num_workers=workers,
                             pin_memory=True,
                             drop_last=False,
                             shuffle=False)

    # Initialize metric
    num_classes = len(test_dataset.classes)
    confmat = ConfusionMatrix(num_classes=num_classes)

    if forced or not os.path.isfile(scores_path):
        # For each batch of frames in the input video
        ious = []
        for i, (input, target) in enumerate(
                tqdm(test_loader, unit='batches', file=sys.stdout)):
            # Prepare input
            if isinstance(input, (list, tuple)):
                for j in range(len(input)):
                    input[j] = input[j].to(device)
            else:
                input = input.to(device)
            target = target.to(device)

            # Execute model
            pred = model(input)
            if pred.shape[2:] != target.shape[
                    1:]:  # Make sure the prediction and target are of the same resolution
                pred = F.interpolate(pred,
                                     size=target.shape[1:],
                                     mode='bilinear')

            # Update confusion matrix
            confmat.update(
                target.flatten(),
                pred.argmax(1).flatten()
                if pred.dim() == 4 else pred.flatten())

            # Calculate IoU scores
            for b in range(target.shape[0]):
                ious.append(
                    jaccard(target[b].unsqueeze(0), pred[b].unsqueeze(0),
                            num_classes, 0).item())
        # Save metrics to file
        ious = np.array(ious)
        global_acc, class_acc, class_iou = confmat.compute()
        global_acc = global_acc.item()
        class_acc = class_acc.cpu().numpy()
        class_iou = class_iou.cpu().numpy()
        np.savez(scores_path,
                 ious=ious,
                 global_acc=global_acc,
                 class_acc=class_acc,
                 class_iou=class_iou)
    else:  # Load metrics from file
        scores_archive = np.load(scores_path)
        ious = scores_archive['ious']
        global_acc = scores_archive['global_acc']
        class_acc = scores_archive['class_acc']
        class_iou = scores_archive['class_iou']

    # Print results
    print(f'global_acc={global_acc}')
    print(f'class_acc={class_acc}')
    print(f'class_iou={class_iou}')
    print(f'mIoU={np.mean(class_iou)}')

    # Display edge predictions
    indices = np.argsort(ious)
    if display_worst:
        print('Displaying worst predictions...')
        display_subset(test_dataset,
                       indices[:display_worst],
                       model,
                       device,
                       batch_size,
                       scale=0.5,
                       alpha=display_alpha,
                       with_input=display_with_input,
                       display_sources=display_sources,
                       ignore_index=display_background_index)
    if display_best:
        print('Displaying best predictions...')
        display_subset(test_dataset,
                       indices[-display_best:],
                       model,
                       device,
                       batch_size,
                       scale=0.5,
                       alpha=display_alpha,
                       with_input=display_with_input,
                       display_sources=display_sources,
                       ignore_index=display_background_index)
Beispiel #9
0
def main(
    # General arguments
    exp_dir,
    model=d('model'),
    gpus=d('gpus'),
    cpu_only=d('cpu_only'),
    workers=d('workers'),
    batch_size=d('batch_size'),
    arch=d('arch'),
    display_worst=d('display_worst'),
    display_best=d('display_best'),
    display_sources=d('display_sources'),
    forced=d('forced'),
    trace=d('trace'),
    iterations=d('iterations'),

    # Data arguments
    test_dataset=d('test_dataset'),
    img_transforms=d('img_transforms'),
    tensor_transforms=d('tensor_transforms')):
    # Validation
    assert os.path.isdir(
        exp_dir), f'exp_dir "{exp_dir}" must be a path to a directory'
    model = 'model_best.pth' if model is None else model
    model = os.path.join(exp_dir,
                         model) if not os.path.isfile(model) else model
    if not os.path.isfile(model):
        model = None

    # Initialize cache directory
    cache_dir = os.path.join(exp_dir,
                             os.path.splitext(os.path.basename(__file__))[0])
    scores_path = os.path.join(cache_dir, 'scores.npz')
    os.makedirs(cache_dir, exist_ok=True)

    # Initialize device
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True
    device, gpus = set_device(gpus, not cpu_only)

    # Initialize transforms
    img_transforms = obj_factory(
        img_transforms) if img_transforms is not None else []
    tensor_transforms = obj_factory(
        tensor_transforms) if tensor_transforms is not None else []
    test_transforms = Compose(img_transforms + tensor_transforms)

    # Initialize dataset
    test_dataset = obj_factory(test_dataset, transforms=test_transforms)
    test_sampler = None if iterations is None else RandomSampler(
        test_dataset, True, iterations)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             num_workers=workers,
                             pin_memory=True,
                             drop_last=False,
                             shuffle=False,
                             sampler=test_sampler)

    # Load segmentation model
    if model is None:
        assert arch is not None
        model = obj_factory(arch).to(device)
    else:
        model = load_model(model, 'segmentation', device, arch)

    # Remove BN
    model = remove_bn(model)

    # Trace model
    if trace:
        sample, target = test_dataset[0]
        model = torch.jit.trace(model, sample.unsqueeze(0).to(device))

    # Support multiple GPUs
    if gpus and len(gpus) > 1:
        model = nn.DataParallel(model, gpus)

    # Initialize metric
    num_classes = len(test_dataset.classes)
    confmat = ConfusionMatrix(num_classes=num_classes)

    if forced or not os.path.isfile(scores_path):
        for j in range(2):
            # For each batch of frames in the input video
            ious = []
            total_time = 0.
            total_iterations = 0
            pbar = tqdm(test_loader, unit='batches', file=sys.stdout)
            for i, (input, target) in enumerate(pbar):
                target = target.to(device)

                # Start measuring time
                torch.cuda.synchronize()
                start_time = time.perf_counter()

                # Prepare input
                if isinstance(input, (list, tuple)):
                    for j in range(len(input)):
                        input[j] = input[j].to(device)
                else:
                    input = input.to(device)

                # Execute model
                pred = model(input)

                # Stop measuring time
                torch.cuda.synchronize()
                elapsed_time = time.perf_counter() - start_time
                total_time += elapsed_time
                total_iterations += pred.shape[0]
                fps = total_iterations / total_time

                # Update confusion matrix
                confmat.update(
                    target.flatten(),
                    pred.argmax(1).flatten()
                    if pred.dim() == 4 else pred.flatten())

                # Calculate IoU scores
                for b in range(target.shape[0]):
                    ious.append(
                        jaccard(target[b].unsqueeze(0), pred[b].unsqueeze(0),
                                num_classes, 0).item())

                # Update progress bar info
                pbar.set_description(f'fps = {fps}')

        # Save metrics to file
        ious = np.array(ious)
        global_acc, class_acc, class_iou = confmat.compute()
        global_acc = global_acc.item()
        class_acc = class_acc.cpu().numpy()
        class_iou = class_iou.cpu().numpy()
        fps = len(test_loader) / total_time
        np.savez(scores_path,
                 ious=ious,
                 global_acc=global_acc,
                 class_acc=class_acc,
                 class_iou=class_iou,
                 fps=fps)
    else:  # Load metrics from file
        scores_archive = np.load(scores_path)
        ious = scores_archive['ious']
        global_acc = scores_archive['global_acc']
        class_acc = scores_archive['class_acc']
        class_iou = scores_archive['class_iou']
        fps = scores_archive['fps']

    # Print results
    print(f'global_acc={global_acc}')
    print(f'class_acc={class_acc}')
    print(f'class_iou={class_iou}')
    print(f'mIoU={np.mean(class_iou)}')
    print(f'fps={fps}')

    # Display edge predictions
    indices = np.argsort(ious)
    if display_worst:
        print('Displaying worst predictions...')
        display_subset(test_dataset,
                       indices[:display_worst],
                       model,
                       device,
                       batch_size,
                       scale=0.5,
                       alpha=0.5,
                       with_input=False,
                       display_sources=display_sources)
    if display_best:
        print('Displaying best predictions...')
        display_subset(test_dataset,
                       indices[-display_best:],
                       model,
                       device,
                       batch_size,
                       scale=0.5,
                       alpha=0.5,
                       with_input=False,
                       display_sources=display_sources)