Exemple #1
0
    def __init__(self, in_planes, planes, stride=1):
        super().__init__()

        self.in_planes = in_planes
        self.planes = planes
        self.stride = stride

        conv1 = torch.nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        bn1 = torch.nn.BatchNorm2d(planes)
        relu1 = torch.nn.ReLU()
        conv2 = torch.nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        bn2 = torch.nn.BatchNorm2d(planes)
        relu2 = torch.nn.ReLU()

        self.block1 = LayerBlock([conv1, bn1], [relu1])
        self.block2 = LayerBlock([conv2, bn2], [])
        self.act3 = make_activationop(relu2)

        self.As = None
Exemple #2
0
    def __init__(self, num_blocks, num_classes=10):
        super().__init__()
        self.in_planes = 16

        conv1 = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
        bn1 = torch.nn.BatchNorm2d(16)
        relu1 = torch.nn.ReLU()
        avgpool9 = torch.nn.AvgPool2d(8)
        flatten9 = FlattenOpL()
        linear9 = torch.nn.Linear(64, num_classes)

        block1 = LayerBlock([conv1, bn1], [relu1])
        layerlist1 = self._make_layer(16, num_blocks[0], stride=1)
        layerlist2 = self._make_layer(32, num_blocks[1], stride=2)
        layerlist3 = self._make_layer(64, num_blocks[2], stride=2)
        block9 = LayerBlock([avgpool9, flatten9, linear9], [])

        self.apply(_weights_init)
        self.seqmodel = SequentialBlockModel([block1]+layerlist1+layerlist2+layerlist3+[block9])
Exemple #3
0
    def __init__(self):
        super().__init__()
        self.conv1 = torch.nn.Conv2d(in_channels=1,
                                     out_channels=6,
                                     kernel_size=5)
        self.conv2 = torch.nn.Conv2d(in_channels=6,
                                     out_channels=16,
                                     kernel_size=5)
        self.fc1 = torch.nn.Linear(16 * 5 * 5, 120)
        self.fc2 = torch.nn.Linear(120, 84)
        self.fc3 = torch.nn.Linear(84, 10)
        self.relu = torch.nn.ReLU()
        self.avgpool = torch.nn.AvgPool2d(2)

        self.flatten = FlattenOpL()
        self.block1 = LayerBlock([self.conv1], [self.relu])
        self.block2 = LayerBlock([self.avgpool, self.conv2], [self.relu])
        self.block3 = LayerBlock([self.avgpool, self.flatten, self.fc1],
                                 [self.relu])
        self.block4 = LayerBlock([self.fc2], [self.relu])
        self.block5 = LayerBlock([self.fc3], [])
        self.model = SequentialBlockModel(
            [self.block1, self.block2, self.block3, self.block4, self.block5])
Exemple #4
0
import numpy as np
from implicit.Block import LayerBlock

## TODO: To be implemented

lin = torch.nn.Conv2d(in_channels=1,
                      out_channels=6,
                      kernel_size=5,
                      stride=1,
                      padding=1)
act = torch.nn.ReLU()
act1 = torch.nn.MaxPool2d(kernel_size=2, stride=2)
input = torch.randn((10, 30, 30, 1))

# simple test
block = LayerBlock([lin], [act])

tensor_out = block(input)
i = block.implicit_forward(input)
t = tensor_out.detach().cpu().numpy().reshape(10, -1)
assert np.isclose(np.abs(i - t).max(), 0, atol=1e-5)

# MaxPool Test
block = LayerBlock([lin], [act1])

tensor_out = block(input)
i = block.implicit_forward(input)
t = tensor_out.detach().cpu().numpy().reshape(10, -1)
t = t[:, block.out_size()]
assert np.isclose(np.abs(i - t).max(), 0, atol=1e-5)
Exemple #5
0
import torch
import numpy as np
from implicit.Block import LayerBlock
from implicit.Model import SequentialBlockModel

lin = torch.nn.Linear(100, 20)
lin0 = torch.nn.Linear(100, 100)
lin1 = torch.nn.Linear(20, 20)
act = torch.nn.ReLU()
input = torch.randn((10, 100))

# simple test
block = LayerBlock([lin], [act])

tensor_out = block(input)
i = block.implicit_forward(input)
t = tensor_out.detach().cpu().numpy()
assert np.isclose(np.abs(i - t).max(), 0, atol=1e-5)

# complicated
block = LayerBlock([lin0, lin, lin1], [act, act])

tensor_out = block(input)
i = block.implicit_forward(input)
t = tensor_out.detach().cpu().numpy()
assert np.isclose(np.abs(i - t).max(), 0, atol=1e-5)
Exemple #6
0
import torch
import numpy as np
from implicit.Block import LayerBlock
from implicit.Model import SequentialBlockModel
import scipy.sparse as sp

lin = torch.nn.Linear(784, 1024)
lin0 = torch.nn.Linear(1024, 1024)
lin1 = torch.nn.Linear(1024, 1024)
lin2 = torch.nn.Linear(1024, 300)
lin3 = torch.nn.Linear(300, 10)
act = torch.nn.ReLU()
input = torch.randn((10, 784))

# simple test
block = LayerBlock([lin], [act])
block0 = LayerBlock([lin0], [act])
block1 = LayerBlock([lin1], [act])
block2 = LayerBlock([lin2], [act])
block3 = LayerBlock([lin3], [])
model = SequentialBlockModel([block, block0, block1, block2, block3])

tensor_out = model(input)
A, B, C, D, phi = model.getImplicitModel(input[0:1, :])
i = SequentialBlockModel.implicit_forward(A, B, C, D, phi, input)
t = tensor_out.detach().cpu().numpy()
assert np.isclose(np.abs(i - t).max(), 0, atol=1e-5)

import matplotlib
from matplotlib import pyplot as plt
Exemple #7
0
class ResBasicBlock(BasicBlock):
    expansion = 1

    def __init__(self, in_planes, planes, stride=1):
        super().__init__()

        self.in_planes = in_planes
        self.planes = planes
        self.stride = stride

        conv1 = torch.nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        bn1 = torch.nn.BatchNorm2d(planes)
        relu1 = torch.nn.ReLU()
        conv2 = torch.nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        bn2 = torch.nn.BatchNorm2d(planes)
        relu2 = torch.nn.ReLU()

        self.block1 = LayerBlock([conv1, bn1], [relu1])
        self.block2 = LayerBlock([conv2, bn2], [])
        self.act3 = make_activationop(relu2)

        self.As = None

    def forward(self, x):
        out = self.block1(x)
        out = self.block2(out)
        out += self.shortcut(x)
        out = self.act3(out)
        return out

    def shortcut(self, x):
        if self.stride != 1 or self.in_planes != self.planes:
            return torch.nn.functional.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, self.planes//4, self.planes//4), "constant", 0)
        else:
            return x

    def Ab(self):
        A1, b1 = self.block1.Ab()
        A2, b2 = self.block2.Ab()
        As = None
        if self.stride != 1 or self.in_planes != self.planes:
            in_shape = self.block1.layerops[0].in_shape
            in_shape[0] = 1
            in_size = np.prod(in_shape)
            out_shape = self.block1.layerops[-1].out_shape
            out_shape[0] = 1
            out_size = np.prod(out_shape)

            # input indices
            x = torch.from_numpy(np.arange(1,in_size+1).reshape(in_shape))
            xx = self.shortcut(x).detach().cpu().numpy()

            assert np.prod(xx.shape == out_shape)

            # output indices
            iout = np.arange(out_size).reshape(out_shape)
            nz = xx.nonzero()
            xnz = iout[nz]
            ynz = xx[nz] - 1
            v = np.ones(len(nz[0]))
            idx = (xnz, ynz)
            As = sp.coo_matrix((v, idx), shape=(self.block2.out_size(), self.block1.in_size()))
        else:
            As = sp.eye(self.block1.in_size())
        #A = sp.bmat([[sp.eye(self.out_size()), None, As], [None, A2, None], [None, None, A1]])
        #b = np.vstack((np.zeros((self.out_size(),1)), b2, b1))
        A = sp.bmat([[A2, As], [None, A1]])
        b = np.vstack((b2, b1))

        #self.As = As
        #self.A1 = A1
        #self.A2 = A2
        #self.b1 = b1
        #self.b2 = b2
        return A,b

    def phi(self):
        phi1 = self.block1.phi()
        #phi2 = self.block2.phi()
        phi3 = self.act3.phi()
        # make phi
        #start = self.out_size()
        #indices = [slice(0, start, None)]
        start = 0
        indices = []
        for block in [self.block2, self.block1]:
            indices.append(slice(start, start + block.out_size()))
            start += block.out_size()
        phi = Phi.concatenate([phi3, phi1], indices)
        return phi

    def in_size(self):
        return self.block1.in_size()

    def out_size(self):
        return self.block2.out_size()

    def implicit_forward(self, input):
        # NOT FOR USE!
        A, b = self.Ab()
        phi = self.phi()

        m = input.shape[0]
        u = input.clone().detach().cpu().numpy().reshape(m, -1).T if isinstance(input, torch.Tensor) else input.reshape(
            m, -1).T

        outb1 = self.block1.implicit_forward(input).T

        outb2 = self.block2.implicit_forward(outb1.T).T

        out = outb2.T + (self.As @ u).T


        u2 = sp.bmat([[sp.coo_matrix((self.block2.in_size(), m))], [u]]).toarray()
        out2 = phi(A@u2+b)
        out2b1 = out2[-self.block2.in_size():, :]
        u2[:self.block2.in_size(), :] = out2b1
        out2 = phi(A@u2+b)
        out2b2 = out2[:self.block2.out_size(),:]
        out2 = out2b2.T

        assert np.isclose(np.abs(out - out2).max(), 0, atol=1e-4)
        return out
Exemple #8
0
import torch
import numpy as np
import scipy.sparse as sp
from implicit.Block import LayerBlock
from implicit.Model import SequentialBlockModel
from implicit.ResNetModel import ResNetModel, ResBasicBlock

input = torch.randn((10, 784 * 3))
input = input.reshape((10, 3, 28, 28))
input = torch.nn.functional.pad(input, (2, 2, 2, 2), 'constant', 0)

conv1 = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
bn1 = torch.nn.BatchNorm2d(16)
relu1 = torch.nn.ReLU()
block1 = LayerBlock([conv1, bn1], [relu1])

block1.eval()
tensor_out = block1(input)
i = block1.implicit_forward(input)
t = tensor_out.detach().cpu().numpy().reshape([10, -1])
assert np.isclose(np.abs(i - t).max(), 0, atol=5e-5)

block2 = ResBasicBlock(16, 32, 2)
block2.eval()
input = tensor_out
tensor_out = block2(input)
i = block2.implicit_forward(input)
t = tensor_out.detach().cpu().numpy().reshape([10, -1])
assert np.isclose(np.abs(i - t).max(), 0, atol=5e-5)

block3 = ResBasicBlock(32, 64, 2)