コード例 #1
0
def test_resnext101_feats(benchmark, model, dev, normalize, dets, padding,
                          streams):
    im_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])
    frames = th.rand((streams, 3, 720, 1280), dtype=th.float32)

    H, W = frames.shape[-2:]
    width = dets[:, 2] - dets[:, 0] + 1
    height = dets[:, 3] - dets[:, 1] + 1
    paddingW = width * padding[0]
    paddingH = height * padding[1]
    dets[:, 0] -= paddingW
    dets[:, 1] -= paddingH
    dets[:, 2] += paddingW
    dets[:, 3] += paddingH
    dets = clip_boxes_to_image(dets, (H, W))

    batch = []
    for s in range(streams):
        for box in dets.round().long():
            x1, y1, x2, y2 = box.tolist()
            batch.append(im_transform(frames[s, :, y1:y2, x1:x2]))

    batch = th.stack(batch).to(dev)
    th.cuda.synchronize()
    feats = benchmark(call_backbone, model, batch)
    assert feats.shape[0] == batch.shape[0]
コード例 #2
0
ファイル: test_convnext.py プロジェクト: necla-ml/ML-Vision
def test_convnext_small_spatial_feats(benchmark, model, normalize, dev,
                                      batch_size):
    spatial_transform = transforms.Compose([normalize])
    batch = []
    for n in range(batch_size):
        frame = th.rand((3, 720, 1280), dtype=th.float32)
        frame = spatial_transform(frame).to(dev)
        batch.append(frame)

    batch = th.stack(batch)
    th.cuda.synchronize()
    spatial_feats = benchmark(call_backbone_spatial, model, batch)
    assert spatial_feats.shape[0] == batch_size
コード例 #3
0
def transform(shape):
    return T.Compose([T.Resize(shape[1:]), T.ToTensor()])
コード例 #4
0
def transform(shape):
    import ml.vision.transforms as T
    return T.Compose([T.Resize(shape[1:]), T.ToTensor()])