Beispiel #1
0
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, padding=1, bias=False)
     self.conv2 = Conv2d(3, 128, 3, dilation=2, bias=False)
     self.bn1 = BatchNorm1d(128)
     self.bn2 = BatchNorm2d(128)
     self.pooling = MaxPool2d(kernel_size=2, padding=0)
     modules = OrderedDict()
     modules["depthwise"] = Conv2d(
         256,
         256,
         3,
         1,
         0,
         groups=256,
         bias=False,
     )
     modules["pointwise"] = Conv2d(
         256,
         256,
         kernel_size=1,
         stride=1,
         padding=0,
         bias=True,
     )
     self.submodule1 = Sequential(modules)
     self.list1 = [Dropout(drop_prob=0.1), [Softmax(axis=100)]]
     self.tuple1 = (
         Dropout(drop_prob=0.1),
         (Softmax(axis=100), Dropout(drop_prob=0.2)),
     )
     self.dict1 = {"Dropout": Dropout(drop_prob=0.1)}
     self.fc1 = Linear(512, 1024)
Beispiel #2
0
def run_syncbn(trace_mode):
    x = F.ones([2, 16, 4, 4], dtype="float32")

    net = Sequential(
        Conv2d(16, 16, 1), SyncBatchNorm(16), Conv2d(16, 16, 1), SyncBatchNorm(16),
    )

    gm = ad.GradManager().attach(
        net.parameters(), callbacks=dist.make_allreduce_cb("MEAN")
    )
    opt = optimizer.SGD(net.parameters(), 1e-3)

    def train_func(x):
        with gm:
            y = net(x)
            loss = y.mean()
            gm.backward(loss)
            opt.step().clear_grad()
        return loss

    if trace_mode is not None:
        train_func = trace(train_func, symbolic=trace_mode)

    for _ in range(3):
        loss = train_func(x)
        loss.numpy()
Beispiel #3
0
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, stride=2, bias=False)
     self.conv2 = Conv2d(3, 128, 3, padding=1, bias=False)
     self.conv3 = Conv2d(3, 128, 3, dilation=2, bias=False)
     self.bn1 = BatchNorm2d(128)
     self.bn2 = BatchNorm1d(128)
     self.dropout = Dropout(drop_prob=0.1)
     self.softmax = Softmax(axis=100)
     self.pooling = MaxPool2d(kernel_size=2, padding=0)
     self.submodule1 = Sequential(Dropout(drop_prob=0.1), Softmax(axis=100),)
     self.fc1 = Linear(512, 1024)
 def __init__(self, has_bn=False):
     super().__init__()
     self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
     self.pool0 = AvgPool2d(2)
     self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
     self.pool1 = AvgPool2d(2)
     self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
     self.fc1 = Linear(500, 10, bias=True)
     self.bn0 = None
     self.bn1 = None
     if has_bn:
         self.bn0 = BatchNorm2d(20)
         self.bn1 = BatchNorm2d(20)
Beispiel #5
0
 def __init__(self, groups, bias):
     super().__init__()
     self.quant = QuantStub()
     self.dequant = DequantStub()
     self.conv = Conv2d(
         in_channels, out_channels, kernel_size, groups=groups, bias=bias
     )
     self.conv_relu = ConvRelu2d(
         out_channels, in_channels, kernel_size, groups=groups, bias=bias
     )
Beispiel #6
0
def test_calculate_fan_in_and_fan_out():
    l = Linear(in_features=3, out_features=8)
    fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    assert fanin == 3
    assert fanout == 8

    with pytest.raises(ValueError):
        calculate_fan_in_and_fan_out(l.bias)

    l = Conv1d(in_channels=2, out_channels=3, kernel_size=5)
    fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    assert fanin == 2 * 5
    assert fanout == 3 * 5

    # FIXME: will be wrong for group conv1d
    # l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2)
    # fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    # assert fanin == 2 // 2 * 5
    # assert fanout == 4 // 2 * 5

    l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7))
    fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    assert fanin == 2 * 5 * 7
    assert fanout == 3 * 5 * 7

    l = Conv2d(in_channels=2, out_channels=4, kernel_size=(5, 7), groups=2)
    fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    assert fanin == 2 // 2 * 5 * 7
    assert fanout == 4 // 2 * 5 * 7

    # FIXME: will be wrong for conv3d
    # l = Conv3d(in_channels=2, out_channels=3, kernel_size=(5, 7, 9))
    # fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    # assert fanin == 2 * 5 * 7 * 9
    # assert fanout == 3 * 5 * 7 * 9

    l = Conv3d(in_channels=2, out_channels=4, kernel_size=(5, 7, 9), groups=2)
    fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    assert fanin == 2 // 2 * 5 * 7 * 9
    assert fanout == 4 // 2 * 5 * 7 * 9
Beispiel #7
0
def test_calculate_fan_in_and_fan_out():
    l = Linear(in_features=3, out_features=8)
    fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    assert fanin == 3
    assert fanout == 8

    with pytest.raises(ValueError):
        calculate_fan_in_and_fan_out(l.bias)

    l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7))
    fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
    assert fanin == 2 * 5 * 7
    assert fanout == 3 * 5 * 7
Beispiel #8
0
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, bias=False)
     self.softmax = Softmax(100)
Beispiel #9
0
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, bias=False)
     self.conv2 = self.conv1
     self.conv3 = self.conv1
Beispiel #10
0
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, bias=False)
     self.conv2 = Conv2d(3, 128, 3, padding=1, bias=False)
     self.conv1 = Conv2d(3, 256, 3, dilation=2, bias=False)
Beispiel #11
0
 def __init__(self):
     super().__init__()
     self.conv0 = Conv2d(1, 1, kernel_size=3, bias=False)
     self.conv1 = Conv2d(1, 1, kernel_size=3, bias=False)
     self.conv1.weight = self.conv0.weight