Пример #1
0
def test_point_head():

    inputs = [torch.randn(1, 32, 45, 45)]
    point_head = PointHead(
        in_channels=[32], in_index=[0], channels=16, num_classes=19)
    assert len(point_head.fcs) == 3
    fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
    if torch.cuda.is_available():
        head, inputs = to_cuda(point_head, inputs)
        head, inputs = to_cuda(fcn_head, inputs)
    prev_output = fcn_head(inputs)
    test_cfg = ConfigDict(
        subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
    output = point_head.forward_test(inputs, prev_output, None, test_cfg)
    assert output.shape == (1, point_head.num_classes, 180, 180)

    # test multiple losses case
    inputs = [torch.randn(1, 32, 45, 45)]
    point_head_multiple_losses = PointHead(
        in_channels=[32],
        in_index=[0],
        channels=16,
        num_classes=19,
        loss_decode=[
            dict(type='CrossEntropyLoss', loss_name='loss_1'),
            dict(type='CrossEntropyLoss', loss_name='loss_2')
        ])
    assert len(point_head_multiple_losses.fcs) == 3
    fcn_head_multiple_losses = FCNHead(
        in_channels=32,
        channels=16,
        num_classes=19,
        loss_decode=[
            dict(type='CrossEntropyLoss', loss_name='loss_1'),
            dict(type='CrossEntropyLoss', loss_name='loss_2')
        ])
    if torch.cuda.is_available():
        head, inputs = to_cuda(point_head_multiple_losses, inputs)
        head, inputs = to_cuda(fcn_head_multiple_losses, inputs)
    prev_output = fcn_head_multiple_losses(inputs)
    test_cfg = ConfigDict(
        subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
    output = point_head_multiple_losses.forward_test(inputs, prev_output, None,
                                                     test_cfg)
    assert output.shape == (1, point_head.num_classes, 180, 180)

    fake_label = torch.ones([1, 180, 180], dtype=torch.long)

    if torch.cuda.is_available():
        fake_label = fake_label.cuda()
    loss = point_head_multiple_losses.losses(output, fake_label)
    assert 'pointloss_1' in loss
    assert 'pointloss_2' in loss
Пример #2
0
def _context_for_ohem_multiple_loss():
    return FCNHead(in_channels=32,
                   channels=16,
                   num_classes=19,
                   loss_decode=[
                       dict(type='CrossEntropyLoss', loss_name='loss_1'),
                       dict(type='CrossEntropyLoss', loss_name='loss_2')
                   ])
Пример #3
0
def test_ocr_head():

    inputs = [torch.randn(1, 8, 23, 23)]
    ocr_head = OCRHead(
        in_channels=8, channels=4, num_classes=19, ocr_channels=8)
    fcn_head = FCNHead(in_channels=8, channels=4, num_classes=19)
    if torch.cuda.is_available():
        head, inputs = to_cuda(ocr_head, inputs)
        head, inputs = to_cuda(fcn_head, inputs)
    prev_output = fcn_head(inputs)
    output = ocr_head(inputs, prev_output)
    assert output.shape == (1, ocr_head.num_classes, 23, 23)
Пример #4
0
def test_point_head():

    inputs = [torch.randn(1, 32, 45, 45)]
    point_head = PointHead(
        in_channels=[32], in_index=[0], channels=16, num_classes=19)
    assert len(point_head.fcs) == 3
    fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
    if torch.cuda.is_available():
        head, inputs = to_cuda(point_head, inputs)
        head, inputs = to_cuda(fcn_head, inputs)
    prev_output = fcn_head(inputs)
    test_cfg = ConfigDict(
        subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
    output = point_head.forward_test(inputs, prev_output, None, test_cfg)
    assert output.shape == (1, point_head.num_classes, 180, 180)
Пример #5
0
def _context_for_ohem():
    return FCNHead(in_channels=32, channels=16, num_classes=19)
def test_fcn_head():

    with pytest.raises(AssertionError):
        # num_convs must be not less than 0
        FCNHead(num_classes=19, num_convs=-1)

    # test no norm_cfg
    head = FCNHead(in_channels=32, channels=16, num_classes=19)
    for m in head.modules():
        if isinstance(m, ConvModule):
            assert not m.with_norm

    # test with norm_cfg
    head = FCNHead(in_channels=32,
                   channels=16,
                   num_classes=19,
                   norm_cfg=dict(type='SyncBN'))
    for m in head.modules():
        if isinstance(m, ConvModule):
            assert m.with_norm and isinstance(m.bn, SyncBatchNorm)

    # test concat_input=False
    inputs = [torch.randn(1, 32, 45, 45)]
    head = FCNHead(in_channels=32,
                   channels=16,
                   num_classes=19,
                   concat_input=False)
    if torch.cuda.is_available():
        head, inputs = to_cuda(head, inputs)
    assert len(head.convs) == 2
    assert not head.concat_input and not hasattr(head, 'conv_cat')
    outputs = head(inputs)
    assert outputs.shape == (1, head.num_classes, 45, 45)

    # test concat_input=True
    inputs = [torch.randn(1, 32, 45, 45)]
    head = FCNHead(in_channels=32,
                   channels=16,
                   num_classes=19,
                   concat_input=True)
    if torch.cuda.is_available():
        head, inputs = to_cuda(head, inputs)
    assert len(head.convs) == 2
    assert head.concat_input
    assert head.conv_cat.in_channels == 48
    outputs = head(inputs)
    assert outputs.shape == (1, head.num_classes, 45, 45)

    # test kernel_size=3
    inputs = [torch.randn(1, 32, 45, 45)]
    head = FCNHead(in_channels=32, channels=16, num_classes=19)
    if torch.cuda.is_available():
        head, inputs = to_cuda(head, inputs)
    for i in range(len(head.convs)):
        assert head.convs[i].kernel_size == (3, 3)
        assert head.convs[i].padding == 1
    outputs = head(inputs)
    assert outputs.shape == (1, head.num_classes, 45, 45)

    # test kernel_size=1
    inputs = [torch.randn(1, 32, 45, 45)]
    head = FCNHead(in_channels=32, channels=16, num_classes=19, kernel_size=1)
    if torch.cuda.is_available():
        head, inputs = to_cuda(head, inputs)
    for i in range(len(head.convs)):
        assert head.convs[i].kernel_size == (1, 1)
        assert head.convs[i].padding == 0
    outputs = head(inputs)
    assert outputs.shape == (1, head.num_classes, 45, 45)

    # test num_conv
    inputs = [torch.randn(1, 32, 45, 45)]
    head = FCNHead(in_channels=32, channels=16, num_classes=19, num_convs=1)
    if torch.cuda.is_available():
        head, inputs = to_cuda(head, inputs)
    assert len(head.convs) == 1
    outputs = head(inputs)
    assert outputs.shape == (1, head.num_classes, 45, 45)

    # test num_conv = 0
    inputs = [torch.randn(1, 32, 45, 45)]
    head = FCNHead(in_channels=32,
                   channels=32,
                   num_classes=19,
                   num_convs=0,
                   concat_input=False)
    if torch.cuda.is_available():
        head, inputs = to_cuda(head, inputs)
    assert isinstance(head.convs, torch.nn.Identity)
    outputs = head(inputs)
    assert outputs.shape == (1, head.num_classes, 45, 45)