Exemple #1
0
        self.cnt = 0

    def __call__(self, image):
        if self.cnt % 1000 == 0:
            print(self.cnt)
        self.cnt += 1
        image = self.to_tensor(image) * 255
        image.unsqueeze_(0)
        image = self.filter(image)
        image = sf.local_normalization(image, 8)
        temporal_image = self.temporal_transform(image)
        return temporal_image.sign()


kernels = [
    utils.DoGKernel(3, 3 / 9, 6 / 9),
    utils.DoGKernel(3, 6 / 9, 3 / 9),
    utils.DoGKernel(7, 7 / 9, 14 / 9),
    utils.DoGKernel(7, 14 / 9, 7 / 9),
    utils.DoGKernel(13, 13 / 9, 26 / 9),
    utils.DoGKernel(13, 26 / 9, 13 / 9)
]
filter = utils.Filter(kernels, padding=6, thresholds=50)
s1c1 = S1C1Transform(filter)

data_root = "data"
MNIST_train = utils.CacheDataset(
    torchvision.datasets.MNIST(root=data_root,
                               train=True,
                               download=True,
                               transform=s1c1))
Exemple #2
0


def preprocess(x, xtest):
    x = sample_zero_mean(x)
    x = gcn(x)
    xtest = sample_zero_mean(xtest)
    xtest = gcn(xtest)
    return x, xtest

if __name__ == "__main__":

    # kernels = [ utils.DoGKernel(3,1,2), utils.DoGKernel(3,2,1),
    #             utils.OnCenter(3), utils.OffCenter(3)]

    kernels = [utils.DoGKernel(3,1,2), utils.DoGKernel(3,2,1)]


    filter = utils.Filter(kernels, padding = 6, thresholds = 50)

    transform = InputTransform(filter)

    data_root = 'data/'

    MNIST_train = utils.CacheDataset(MNIST(root=data_root, train=True, download=True, transform=transform)) # 60000 x 30 x 30
    MNIST_test = utils.CacheDataset(MNIST(root=data_root, train=True, download=True, transform=transform)) # 10000 x 30

    MNIST_loader = DataLoader(MNIST_train, batch_size=1000, shuffle=True)
    MNIST_test_loader = DataLoader(MNIST_test, batch_size=1000, shuffle=False)

Exemple #3
0
        self.cnt = 0

    def __call__(self, image):
        if self.cnt % 1000 == 0:
            print(self.cnt)
        self.cnt += 1
        image = self.to_tensor(image) * 255
        image.unsqueeze_(0)
        image = self.filter(image)
        image = sf.local_normalization(image, 8)
        temporal_image = self.temporal_transform(image)
        return temporal_image.sign().byte()


kernels = [
    utils.DoGKernel(7, 1, 2),
    utils.DoGKernel(7, 2, 1),
]
filter = utils.Filter(kernels, padding=3, thresholds=50)
s1 = S1Transform(filter)

data_root = "data"
MNIST_train = utils.CacheDataset(
    torchvision.datasets.MNIST(root=data_root,
                               train=True,
                               download=True,
                               transform=s1))
MNIST_test = utils.CacheDataset(
    torchvision.datasets.MNIST(root=data_root,
                               train=False,
                               download=True,