Ejemplo n.º 1
0
def nf_grad_Test(x, y, z):
    x = Tensor(x, requires_grad=True)
    y = Tensor(y, requires_grad=True)
    z = Tensor(z, requires_grad=True)
    t1 = time()
    f9 = func(x, y, z)
    t2 = time() - t1
    t1 = time()
    f9.backward()
    print("nf", t2, time() - t1)
    return [x.grad, y.grad, z.grad]
Ejemplo n.º 2
0
def conv2d(a,
           weight,
           bias=None,
           padding='valid',
           stride=(1, 1),
           dilation=(1, 1),
           groups=1):
    """
    NCHW
    :param a: NCHW
    :param weight: OIHW
    :param bias:
    :param padding:
    :param stride: sH, sW
    :param dilation: dH, dW
    :param groups:
    :return:
    """
    assert padding in ('valid', 'same', 'full') or isinstance(
        padding, (tuple, list))
    if isinstance(stride, int):
        stride = (stride, stride)
    assert isinstance(stride, (tuple, list))
    if isinstance(dilation, int):
        dilation = (dilation, dilation)
    assert isinstance(dilation, (tuple, list))
    bs, xch, xh, xw = a.shape
    zch, _, k0, k1 = weight.shape
    if isinstance(padding, (tuple, list)):
        a = Tensor._op(Pad, a, op_args=(padding, 'zeros'))
    if padding is 'same':
        zshape = np.ceil([xh / stride[0], xw / stride[1]]).astype(int)
        if stride[0] < k0:
            ph = (k0 - 1) * dilation[0] + zshape[0] * stride[0] - xh
        else:
            ph = zshape[0] * stride[0] - xh
        if stride[1] < k1:
            pw = (k1 - 1) * dilation[1] + zshape[1] * stride[1] - xw
        else:
            pw = zshape[1] * stride[1] - xw
        # padding = (pw//2, (pw+1)//2, ph//2, (ph+1)//2)
        padding = (ph // 2, (ph + 1) // 2, pw // 2, (pw + 1) // 2)
        # print(padding, pw, ph, zshape, xh, xw, stride)
        a = Tensor._op(Pad, a, op_args=(padding, 'zeros'))
    out = Tensor._op(Conv2d, a, weight, op_args=(stride, dilation))
    if bias is not None:
        out = out + bias
    return out
Ejemplo n.º 3
0
        def forward(self, x):
            if isinstance(x, np.ndarray):
                x = Tensor(x)

            x = nmF.relu(self.fc1(x))
            # x = nmF.relu(self.fc2(x))
            # x = nmF.softmax(self.fc3(x), 1)
            return x
Ejemplo n.º 4
0
        def forward(self, x):
            if isinstance(x, np.ndarray):
                x = Tensor(x)

            x = nmF.relu(self.b1(self.c1(x)))
            x = nmF.relu(self.b2(self.c2(x)))
            x = nmF.relu(self.b3(self.c3(x)))
            return x
Ejemplo n.º 5
0
 def forward(self, x):
     if isinstance(x, np.ndarray):
         x = Tensor(x)
     # x = F.relu(self.c1(x))
     # print("x1.shape", x.shape)
     x = self.c1(x)
     x = F.max_pool2d(x)
     x = F.relu(self.c2(x))
     x = F.max_pool2d(x)
     x = F.relu(self.c3(x))
     x = F.max_pool2d(x)
     # print("XXX", x.shape)
     x = x.reshape([-1, 4 * 3 * 3])
     # print("XXX", x.shape)
     x = F.relu(self.fc1(x))
     x = F.softmax(self.fc2(x), -1)
     # x = F.softmax(self.fc1(x), -1)
     # x = self.fc1(x)
     return x
Ejemplo n.º 6
0
def nf_layer(z):
    z = Tensor(z, requires_grad=True)
    net = netBuild(nm)
    r = net(z)
    f1 = nm.Linear(2, 3)
    r = f1(z)
    # print(r.sum(axis=-1, keepdims=True))
    # r = r / nf.sum(r, axis=-1, keepdims=True)
    # a = nf.ones_like(r) * -nf.log(r)
    # r = a.sum(axis=-1, keepdims=False)
    r.backward()
    return [r.data, z.grad]
Ejemplo n.º 7
0
def max_pool2d(a, pool_size=(2, 2), stride=(2, 2), padding='valid'):
    """
    :param a:
    :param pool_size:
    :param stride:
    :param padding:
    :return:
    """
    assert padding in ('valid', 'same', 'full') or isinstance(
        padding, (tuple, list))
    if isinstance(stride, int):
        stride = (stride, stride)
    assert isinstance(stride, (tuple, list))
    if isinstance(pool_size, int):
        pool_size = (pool_size, pool_size)
    assert isinstance(pool_size, (tuple, list))
    bs, xch, xh, xw = a.shape
    if isinstance(padding, (tuple, list)):
        a = Tensor._op(Pad, a, op_args=(padding, 'zeros'))
    # if padding is 'same':
    #     zshape = np.ceil([xh / stride[0], xw / stride[1]]).astype(int)
    #     if stride[0] < pool_size[0]:
    #         ph = (pool_size[0]-1) * dilation[0] + zshape[0] * stride[0] - xh
    #     else:
    #         ph = zshape[0] * stride[0] - xh
    #     if stride[1] < pool_size[1]:
    #         pw = (pool_size[1]-1) * dilation[1] + zshape[1] * stride[1] - xw
    #     else:
    #         pw = zshape[1] * stride[1] - xw
    #     # padding = (pw//2, (pw+1)//2, ph//2, (ph+1)//2)
    #     padding = (ph//2, (ph+1)//2, pw//2, (pw+1)//2)
    #     print(padding, pw, ph, zshape, xh, xw, stride)
    #     a = Tensor._op(Pad, a, op_args=(padding, 'zeros'))
    out = Tensor._op(MaxPool2d, a, op_args=(
        pool_size,
        stride,
    ))
    # return out
    return out
Ejemplo n.º 8
0
    def _load_from_state_dict(self, state_dict, prefix, missing_keys,
                              unexpected_keys, error_msgs):
        local_name_params = itertools.chain(self._parameters.items(),
                                            self._buffers.items())
        local_state = {k: v for k, v in local_name_params if v is not None}

        for name, param in local_state.items():
            key = prefix + name
            if key in state_dict:
                input_param = state_dict[key]

                # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
                if len(param.shape) == 0 and len(input_param.shape) == 1:
                    input_param = input_param[0]

                if input_param.shape != param.shape:
                    if input_param.size == param.size:
                        input_param = input_param.reshape(param.shape)
                    else:
                        # local shape should match the one in checkpoint
                        error_msgs.append(
                            'size mismatch for {}: copying a param with shape {} from checkpoint, '
                            'the shape in current model is {}.'.format(
                                key, input_param.shape, param.shape))
                        continue

                if isinstance(input_param, np.ndarray):
                    # backwards compatibility for serialized parameters
                    input_param = Tensor(input_param)
                try:
                    # print("ppp",id(param), param.requires_grad)
                    param.copy_(input_param)
                    # print("ppp", id(param), param.requires_grad)
                except Exception:
                    error_msgs.append(
                        'While copying the parameter named "{}", '
                        'whose dimensions in the model are {} and '
                        'whose dimensions in the checkpoint are {}.'.format(
                            key, param.shape, input_param.shape))
            else:
                missing_keys.append(key)

        for key in state_dict.keys():
            if key.startswith(prefix):
                input_name = key[len(prefix):]
                input_name = input_name.split(
                    '.', 1)[0]  # get the name of param/buffer/child
                if input_name not in self._modules and input_name not in local_state:
                    unexpected_keys.append(key)
Ejemplo n.º 9
0
def test_Linear():
    class ThModel(nn.Module):
        def __init__(self):
            super(ThModel, self).__init__()

            self.fc1 = nn.Linear(3, 2)
            self.fc2 = nn.Linear(2, 4)
            self.fc3 = nn.Linear(4, 5)

        def forward(self, x):
            if isinstance(x, np.ndarray):
                x = torch.from_numpy(x)
            # x = self.pool(nnF.relu(self.conv1(x)))
            # x = self.pool(nnF.relu(self.conv2(x)))
            # x = x.view(-1, 16 * 5 * 5)
            x = nnF.relu(self.fc1(x))
            # x = nnF.relu(self.fc2(x))
            # x = nnF.softmax(self.fc3(x), 1)
            return x

    class NfModel(nm.Module):
        def __init__(self):
            super(NfModel, self).__init__()

            self.fc1 = nm.Linear(3, 2)
            self.fc2 = nm.Linear(2, 4)
            self.fc3 = nm.Linear(4, 5)

        def forward(self, x):
            if isinstance(x, np.ndarray):
                x = Tensor(x)

            x = nmF.relu(self.fc1(x))
            # x = nmF.relu(self.fc2(x))
            # x = nmF.softmax(self.fc3(x), 1)
            return x

    z = np.random.random([5, 3]).astype(np.float32) * 20

    thnet = ThModel()
    thp = thnet.state_dict()
    for k in thp.keys():
        thp[k] = Tensor(thp[k].numpy())

    nfnet = NfModel()
    nfnet.load_state_dict(thp)
    thopt = optim.SGD(thnet.parameters(), lr=1e-3, momentum=0.4, nesterov=True)
    nfopt = SGD(nfnet.parameters(), lr=1e-3, momentum=0.4, nesterov=True)

    circle = 800

    for i in range(circle):
        thr = thnet(torch.from_numpy(z))
        loss = (3. - thr)
        thopt.zero_grad()
        loss.backward(torch.ones_like(loss))
        thopt.step()

    for i in range(circle):
        nfr = nfnet(z)
        loss = (3. - nfr)
        nfopt.zero_grad()
        loss.backward()
        nfopt.step()

    thr = thnet(z).detach().numpy()
    nfr = nfnet(z).numpy()
    print(thr)
    print(nfr)
Ejemplo n.º 10
0
 # gt = Tensor(y_test[0:BATCH_SIZE])
 # nfr = nfnet(batch).numpy()
 # nfr = np.argmax(nfr, axis=-1)
 # gt = np.argmax(gt.numpy(), axis=-1)
 # print(nfr)
 # print(gt)
 # acc = (nfr == gt).sum()
 # print("初始化正确率",acc / nfr.shape[0])
 epch = 3
 circle = 500
 for j in range(epch):
     # print()
     t1 = time()
     for i in range(circle):
         t2 = time()
         batch = Tensor(x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
                        requires_grad=True)
         gt = Tensor(y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
         nfr = nfnet(batch)
         loss = categorical_crossentropy(nfr, gt)
         nfopt.zero_grad()
         loss.backward()
         nfopt.step()
         print(i, time() - t2)
     # if((j+1) % 5 == 0):
     batch = Tensor(x_test)
     gt = Tensor(y_test)
     nfr = nfnet(batch).numpy()
     nfr = np.argmax(nfr, axis=-1)
     gt = np.argmax(gt.numpy(), axis=-1)
     # print(nfr)
     # print(gt.shape)
Ejemplo n.º 11
0
def sigmoid(a):
    return Tensor._op(Sigmoid, a)
Ejemplo n.º 12
0
    x_train = x_train.astype(float)
    x_test = x_test.astype(float)
    x_train = x_train / x_train.max()
    x_test  = x_test / x_test.max()
    y_train = np.eye(10)[y_train]
    y_test = np.eye(10)[y_test]
    _, input_size = x_train.shape
    _, output_size = y_train.shape
    print(x_train.shape, y_train.shape)
    print(x_test.shape, y_test.shape)
    print(x_train.max(), x_test.max())

    nfnet = NfModel()
    nfopt = SGD(nfnet.parameters(), lr=1e-4, momentum=0.9,nesterov=True)

    batch = Tensor(x_test)
    gt = Tensor(y_test)
    nfr = nfnet(batch).numpy()
    nfr = np.argmax(nfr, axis=-1)
    gt = np.argmax(gt.numpy(), axis=-1)
    # print(nfr)
    # print(gt)
    acc = (nfr == gt).sum()
    print(acc / nfr.shape[0])
    epch = 30
    circle = 999
    for j in range(epch):
        # print()
        t1 = time()
        for i in range(circle):
            batch = Tensor(x_train[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
Ejemplo n.º 13
0
def pad(a, expanded_padding, mode='zeros'):
    return Tensor._op(Pad, a, op_args=(expanded_padding, mode))
Ejemplo n.º 14
0
def softmax(a, dim=None):
    if dim is None:
        dim = -1
    return Tensor._op(Softmax, a, op_args=(dim, ))
Ejemplo n.º 15
0
def test_Conv():
    class ThModel(nn.Module):
        def __init__(self):
            super(ThModel, self).__init__()

            self.c1 = nn.Conv2d(1, 3, 3, stride=2)
            self.c2 = nn.Conv2d(3, 9, (3, 5), stride=(2, 1), padding=(4, 2))
            self.c3 = nn.Conv2d(9, 1, (3, 5), stride=(2, 1), padding=(4, 2))
            self.b1 = nn.BatchNorm2d(3)
            self.b2 = nn.BatchNorm2d(9)
            self.b3 = nn.BatchNorm2d(1)

        def forward(self, x):
            if isinstance(x, np.ndarray):
                x = torch.from_numpy(x)

            x = nnF.relu(self.b1(self.c1(x)))
            x = nnF.relu(self.b2(self.c2(x)))
            x = nnF.relu(self.b3(self.c3(x)))
            return x

    class NfModel(nm.Module):
        def __init__(self):
            super(NfModel, self).__init__()

            self.c1 = nm.Conv2d(1, 3, 3, stride=2)
            self.c2 = nm.Conv2d(3,
                                9, (3, 5),
                                stride=(2, 1),
                                padding=(4, 4, 2, 2))
            self.c3 = nm.Conv2d(9,
                                1, (3, 5),
                                stride=(2, 1),
                                padding=(4, 4, 2, 2))
            self.b1 = nm.BatchNorm2d(3)
            self.b2 = nm.BatchNorm2d(9)
            self.b3 = nm.BatchNorm2d(1)

        def forward(self, x):
            if isinstance(x, np.ndarray):
                x = Tensor(x)

            x = nmF.relu(self.b1(self.c1(x)))
            x = nmF.relu(self.b2(self.c2(x)))
            x = nmF.relu(self.b3(self.c3(x)))
            return x

    z = np.random.random([4, 1, 7, 7]).astype(np.float32)

    thnet = ThModel()
    thp = thnet.state_dict()
    for k in thp.keys():
        thp[k] = Tensor(thp[k].numpy())

    nfnet = NfModel()
    nfnet.load_state_dict(thp)
    thopt = optim.SGD(thnet.parameters(), lr=1e-3, momentum=0.4, nesterov=True)
    nfopt = SGD(nfnet.parameters(), lr=1e-3, momentum=0.4, nesterov=True)

    # l = list(nfnet.parameters())
    # print(l)

    circle = 10

    for i in range(circle):
        thr = thnet(z)
        loss = (3. - thr)
        thopt.zero_grad()
        loss.backward(torch.ones_like(loss))
        # print(thnet.c1.weight.grad)
        thopt.step()

    for i in range(circle):
        nfr = nfnet(z)
        loss = (3. - nfr)
        nfopt.zero_grad()
        loss.backward()
        # print(nfnet.c1.weight.grad)
        nfopt.step()

    thr = thnet(z).detach().numpy()
    nfr = nfnet(z).numpy()
    print(thr)
    print(nfr)
    try:
        a = np.allclose(thr, nfr)
        print(a)
        # if not a:
        #     print(thr)
        #     print(nfr)
    except:
        print("不合适")
        pass
Ejemplo n.º 16
0
    # a = nf.ones_like(r) * -nf.log(r)
    # r = a.sum(axis=-1, keepdims=False)
    r.backward()
    return [r.data, z.grad]



if __name__ =='__main__':
    # setup_seed(20)
    #
    z = np.random.random([5,3]).astype(np.float32) * 20

    thnet = ThModel()
    thp = thnet.state_dict()
    for k in thp.keys():
        thp[k] = Tensor(thp[k].numpy())

    nfnet = NfModel()
    nfnet.load_state_dict(thp)
    thopt = optim.SGD(thnet.parameters(), lr=1e-3, momentum=0.4,nesterov=True)
    nfopt = SGD(nfnet.parameters(), lr=1e-3, momentum=0.4,nesterov=True)

    circle = 800

    for i in range(circle):
        thr = thnet(torch.from_numpy(z))
        loss = (3.-thr)
        thopt.zero_grad()
        loss.backward(torch.ones_like(loss))
        thopt.step()
Ejemplo n.º 17
0
def relu(a, inplace=False):
    return Tensor._op(ReLU, a)