Ejemplo n.º 1
0
#-----------------------------------------------------------------------------
# File Name : test_aedat_legacy_timesurface.py
# Author: Emre Neftci
#
# Creation Date : Tue 16 Mar 2021 01:28:22 PM PDT
# Last Modified :
#
# Copyright : (c) UC Regents, Emre Neftci
# Licence : GPLv3
#-----------------------------------------------------------------------------
from torchneuromorphic.utils import plot_frames_imshow, legacy_aedat_to_events
import torchneuromorphic.transforms as transforms
import pylab as plt
import numpy as np
import sys

device = 'cuda'
events = legacy_aedat_to_events(sys.argv[1])
dt = 1000
size = [2, 346, 260]
process_events = transforms.Compose([
    transforms.Downsample(factor=[dt, 1, 1, 1]),
    transforms.ToCountFrame(T=1000, size=size),
    transforms.ToTensor(),
    transforms.ExpFilterEvents(tau=100, length=500, device=device)
])
frames = process_events(events)

plot_frames_imshow(np.array([frames.detach().cpu().numpy()]), nim=1)
plt.show()
Ejemplo n.º 2
0
def create_dataloader(root='data/nmnist/n_mnist.hdf5',
                      batch_size=72,
                      chunk_size_train=300,
                      chunk_size_test=300,
                      dt=1000,
                      transform_train=None,
                      transform_test=None,
                      target_transform_train=None,
                      target_transform_test=None,
                      **dl_kwargs):

    ds = [1, 1]
    low_crop = [0, 0]
    high_crop = [32, 32]
    size = [
        2,
        np.ceil((high_crop[0] - low_crop[0]) / ds[0]).astype('int'),
        np.ceil((high_crop[1] - low_crop[1]) / ds[1]).astype('int')
    ]

    print(size)
    default_transform = lambda chunk_size: transforms.Compose([
        transforms.CropDims(low_crop, high_crop, [2, 3]),
        transforms.Downsample(factor=[dt, 1, ds[0], ds[1]]),
        transforms.ToCountFrame(T=chunk_size, size=size),
        transforms.ToTensor()
    ])

    if transform_train is None:
        transform_train = default_transform(chunk_size_train)
    if transform_test is None:
        transform_test = default_transform(chunk_size_test)

    if target_transform_train is None:
        target_transform_train = transforms.Compose(
            [transforms.Repeat(chunk_size_train),
             transforms.toOneHot(10)])
    if target_transform_test is None:
        target_transform_test = transforms.Compose(
            [transforms.Repeat(chunk_size_test),
             transforms.toOneHot(10)])

    train_d = NMNISTDataset(root,
                            train=True,
                            transform=transform_train,
                            target_transform=target_transform_train,
                            chunk_size=chunk_size_train)

    train_subset_indices = train_d.keys_by_label[:, :100].reshape(-1)

    train_dl = torch.utils.data.DataLoader(
        train_d,
        batch_size=batch_size,
        sampler=SubsetRandomSampler(train_subset_indices),
        **dl_kwargs)

    test_d = NMNISTDataset(
        root,
        transform=transform_test,
        target_transform=target_transform_test,
        train=False,
        chunk_size=chunk_size_test,
    )

    test_subset_indices = test_d.keys_by_label[:, :100].reshape(
        -1) - test_d.keys[0]

    test_dl = torch.utils.data.DataLoader(
        test_d,
        batch_size=batch_size,
        sampler=SubsetRandomSampler(test_subset_indices),
        **dl_kwargs)

    return train_dl, test_dl
def create_attn_dataloader(root='data/dvsgesture/dvs_gestures_build19.hdf5',
                           batch_size=72,
                           chunk_size_train=500,
                           chunk_size_test=1800,
                           dt=1000,
                           transform_train=None,
                           transform_test=None,
                           target_transform_train=None,
                           target_transform_test=None,
                           **dl_kwargs):

    n_events_attention = 100

    # default_transform = lambda chunk_size:  transforms.Compose([
    #      transforms.CropDims(low_crop,high_crop,[2,3]),
    #      transforms.Downsample(factor=[dt,1,ds[0],ds[1]]),
    #      transforms.ToCountFrame(T = chunk_size, size = size),
    #      transforms.ToTensor()
    # ])

    size = [2, 64, 64]
    default_transform = lambda chunk_size: transforms.Compose([
        transforms.Downsample(factor=[dt, 1, 1, 1]),
        transforms.Attention(n_events_attention, size=size),
        transforms.Downsample(factor=[1, 1, 4, 4]),
        transforms.ToCountFrame(T=chunk_size, size=[2, 16, 16]),
        transforms.ToTensor()
    ])

    if transform_train is None:
        transform_train = default_transform(chunk_size_train)
    if transform_test is None:
        transform_test = default_transform(chunk_size_test)

    if target_transform_train is None:
        target_transform_train = transforms.Compose(
            [transforms.Repeat(chunk_size_train),
             transforms.toOneHot(11)])
    if target_transform_test is None:
        target_transform_test = transforms.Compose(
            [transforms.Repeat(chunk_size_test),
             transforms.toOneHot(11)])

    train_d = DVSGestureDataset(root,
                                train=True,
                                transform=transform_train,
                                target_transform=target_transform_train,
                                chunk_size=chunk_size_train)

    train_dl = torch.utils.data.DataLoader(train_d,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           **dl_kwargs)

    test_d = DVSGestureDataset(root,
                               transform=transform_test,
                               target_transform=target_transform_test,
                               train=False,
                               chunk_size=chunk_size_test)

    test_dl = torch.utils.data.DataLoader(test_d,
                                          batch_size=batch_size,
                                          **dl_kwargs)

    return train_dl, test_dl