Esempio n. 1
0
batch_size = args.batch_size
n_classes = 10
input_channels = 1
seq_length = int(784 / input_channels)
epochs = args.epochs
steps = 0

print(args)
train_loader, test_loader = data_generator(root, batch_size)

permute = torch.Tensor(np.random.permutation(784).astype(np.float64)).long()
channel_sizes = [args.nhid] * args.levels
kernel_size = args.ksize
model = TCN(input_channels,
            n_classes,
            channel_sizes,
            kernel_size=kernel_size,
            dropout=args.dropout)

if args.cuda:
    model.cuda()
    permute = permute.cuda()

lr = args.lr
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)


def train(ep):
    global steps
    train_loss = 0
    model.train()
Esempio n. 2
0
iterations = 0
test_acc = []

print(args)
train_loader, test_loader = data_generator(root, batch_size)

permute = torch.Tensor(np.random.permutation(784).astype(np.float64)).long()
channel_sizes = [args.nhid] * args.levels
kernel_size = args.ksize

if args.lstm:
    model = LSTM(input_channels, 75, n_classes)
else:
    model = TCN(input_channels,
                n_classes,
                channel_sizes,
                kernel_size=kernel_size,
                dropout=args.dropout)

print(count_parameters(model))

if args.cuda:
    model.cuda()
    permute = permute.cuda()

lr = args.lr
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)


def train(ep):
    global steps, iterations, test_acc
Esempio n. 3
0
root = './data/mnist'
batch_size = args.batch_size
n_classes = 10
input_channels = 1
seq_length = int(784 / input_channels)
epochs = args.epochs
steps = 0

print(args)
train_loader, test_loader = data_generator(root, batch_size)

permute = torch.Tensor(np.random.permutation(784).astype(np.float64)).long()
channel_sizes = [args.nhid] * args.levels
kernel_size = args.ksize
model = TCN(input_channels, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)

if args.cuda:
    model.cuda()
    permute = permute.cuda()

lr = args.lr
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)


def train(ep):
    global steps
    train_loss = 0
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda: data, target = data.cuda(), target.cuda()
Esempio n. 4
0
batch_size = args.batch_size
n_classes = 10
input_channels = 1
seq_length = int(784 / input_channels)
epochs = args.epochs
steps = 0

print(args)
train_loader, test_loader = data_generator(root, batch_size)

permute = torch.Tensor(np.random.permutation(784).astype(np.float64)).long()
channel_sizes = [args.nhid] * args.levels
kernel_size = args.ksize
model = TCN(input_channels,
            n_classes,
            channel_sizes,
            kernel_size=kernel_size,
            dropout=args.dropout)

if args.cuda:
    model.cuda()
    permute = permute.cuda()

lr = args.lr
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)


def train(ep):
    global steps
    train_loss = 0
    epoch_loss = []
Esempio n. 5
0
batch_size = args.batch_size
n_classes = 10
input_channels = 1
seq_length = int(784 / input_channels)
epochs = args.epochs
steps = 0

print(args)
train_loader, test_loader = data_generator(root, batch_size)

permute = torch.Tensor(np.random.permutation(784).astype(np.float64)).long()
channel_sizes = [args.nhid] * args.levels
kernel_size = args.ksize
model = TCN(input_channels,
            n_classes,
            channel_sizes,
            kernel_size=kernel_size,
            dropout=args.dropout)

model = Chrysalis.metamorphosize(model, in_place=True)
if args.patch_conv:
    X = next(iter(train_loader))
    model.patch_conv(
        X[0].view(-1, input_channels, seq_length)[:1],
        verbose=True,
        kmatrix_depth=args.kmatrix_depth,
        max_kernel_size=args.max_kernel_size,
        padding_mode='zeros',
        base=args.base,
        perturb=args.perturb,
    )
Esempio n. 6
0
                    help='directory to save model')
parser.add_argument('--batch_size',
                    type=int,
                    default=64,
                    metavar='N',
                    help='batch size (default: 64)')
args = parser.parse_args()

if torch.cuda.is_available():
    if not args.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

_, test_loader = data_generator(args.datapath, args.batch_size)
model = TCN()
model.load_state_dict(torch.load(args.modelpath))
model.eval()

if args.cuda:
    model.cuda()

model.fast_inference(args.batch_size)


def test():
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            if args.cuda: