Exemplo n.º 1
0
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_dataset = torchvision.datasets.ImageFolder(root="./flower_data/val",
                                                transform=transform)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          shuffle=False,
                                          batch_size=test_batch_size,
                                          num_workers=12,
                                          drop_last=True)
# print((len(test_dataset)))
# print(len(test_loader))
# len(test_dataset)==len(test_loader)*test_batch_size
model = net.AlexNet()
model.load_state_dict(torch.load("./weights.pth"))
model.to(device)
model.eval()
criterion = nn.CrossEntropyLoss()
eval_loss = 0
eval_correct_rate = 0
eval_num_correct = 0

with tqdm(total=len(test_loader)) as pbar:
    for img, label in test_loader:
        img = img.to(device)
        label = label.to(device)
        output = model(img)
        loss = criterion(output, label)
        eval_loss += loss.item()
Exemplo n.º 2
0
import util
import net

if __name__ == '__main__':
    parser = util.default_parser('MLP Example')
    args = parser.parse_args()

    # get the dataset (default is MNIST)
    train, test = util.get_dataset(args.dataset)

    # initialize model
    model = net.AlexNet(n_out=10)

    # train model
    util.train_model(model, train, test, args)

    # get test accuracy
    acc = util.accuracy(model, test, gpu=args.gpu)
    print('Model accuracy: ', acc)

    # generate and save C model as a header file
    model.generate_c('alex.h', train._datasets[0].shape[1:])
Exemplo n.º 3
0
import numpy as np

import util
import net

if __name__ == '__main__':
    parser = util.default_parser('MLP Example')
    args = parser.parse_args()

    # get the dataset (default is MNIST)
    train, test = util.get_dataset(args.dataset)

    n_in = train._datasets[0].shape[1]
    n_out = len(np.unique(train._datasets[1]))

    # initialize model
    model = net.AlexNet(n_in, n_out)

    # train model
    util.train_model(model, train, test, args)

    # get test accuracy
    acc = util.accuracy(model, test, gpu=args.gpu)
    print('Model accuracy: ', acc)

    # generate and save C model as a header file
    model.generate_c('alex.h', train._datasets[0].shape[1:])
Exemplo n.º 4
0
x_test = caltech['x_test'].astype('float32')
x_test /= 255

y_train = np.asarray(caltech['y_train'], dtype='int32')
y_test = np.asarray(caltech['y_test'], dtype='int32')

N = data.num_train
N_test = data.num_test

print('- number of training data: %d' % N)
print('- number of test data: %d' % N_test)
print('- number of labels: %d' % len(caltech['label_names']))
print('done.')

# prepare network
model = net.Classifier(net.AlexNet())

# initialize optimizer
if args.optimizer == 'adam':
    optimizer = optimizers.Adam(args.alpha)
if args.optimizer == 'momentumsgd':
    optimizer = optimizers.MomentumSGD(args.learningrate)
optimizer.setup(model)

# training loop
print()
print('start learning')
if args.gpu >= 0:
    model.to_gpu()
for epoch in range(0, n_epoch):
    print('epoch', epoch + 1)