Esempio n. 1
0
def corr2d_multi_in(X, K):
    """多个输入通道的互相关运算
    """

    res = d2l.corr2d(X[0, :, :], K[0, :, :])
    for i in range(1, X.shape[0]):
        res += d2l.corr2d(X[i, :, :], K[i, :, :])
    return res
Esempio n. 2
0
def main():
    # 5.1.1 二维互相关运算
    X = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
    K = torch.tensor([[0, 1], [2, 3]])
    print(d2l.corr2d(X, K))

    # 5.1.2 二维卷积层
    layer = Conv2D(kernel_size=(2, 2))
    output = layer(X)
    print(output.shape)

    # 5.1.3 图像中物体边缘检测
    X = torch.ones(6, 8)
    X[:, 2:6] = 0

    K = torch.tensor([[1, -1]])
    Y = d2l.corr2d(X, K)
    print(Y)

    # 5.1.4 通过数据学习核数组
    conv2d = Conv2D(kernel_size=(1, 2))
    step = 20
    lr = 0.01

    for i in range(step):
        Y_hat = conv2d(X)
        loss = ((Y_hat - Y)**2).sum()
        loss.backward()
        conv2d.weight.data -= lr * conv2d.weight.grad
        conv2d.bias.data -= lr * conv2d.bias.grad
        conv2d.weight.grad.fill_(0)
        conv2d.bias.grad.fill_(0)
        if (i + 1) % 5 == 0:
            print('Step %d, loss %.3f' % (i + 1, loss.item()))

    print(conv2d.weight.data, conv2d.bias.data)
def corr2d_multi_in(X, K):
    # 沿着X和K的第0维(通道维)分别计算再相加
    res = d2l.corr2d(X[0, :, :], K[0, :, :])
    for i in range(1, X.shape[0]):
        res += d2l.corr2d(X[i, :, :], K[i, :, :])
    return res
 def forward(self, x):
     return d2l.corr2d(x, self.weight) + self.bias
# 5.1.2 ⼆维卷积层
class Conv2D(nn.Module):
    def __init__(self, kernel_size):
        super(Conv2D, self).__init__()
        self.weight = nn.Parameter(torch.randn(kernel_size))
        self.bias = nn.Parameter(torch.randn(1))

    def forward(self, x):
        return d2l.corr2d(x, self.weight) + self.bias


# 5.1.3
X = torch.ones(6, 8)
X[:, 2:6] = 0
K = torch.tensor([[1, -1]])
Y = d2l.corr2d(X, K)
# print(Y)

# 5.1.4
# 构造⼀个核数组形状是(1, 2)的⼆维卷积层
conv2d = Conv2D(kernel_size=(1, 2))

step = 20
lr = 0.01
for i in range(step):
    Y_hat = conv2d(X)
    l = ((Y_hat - Y)**2).sum()  # 平方误差
    l.backward()

    # 梯度下降
    conv2d.weight.data -= lr * conv2d.weight.grad