def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = F.mse_loss(output, data)
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))

    if epoch % args.save_image_epoch:
        utils.save_image(data.data,
                         'origin_pictures.png',
                         normalize=True,
                         scale_each=True)
        utils.save_image(output.data,
                         'reconstruct_pictures.png',
                         normalize=True,
                         scale_each=True)

    if epoch % args.save_model_epoch:
        torch.save(model.state_dict(), 'model.pth')
def test():
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.mse_loss(output, data).data[0] # sum up batch loss
    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}'.format(test_loss))
def test():
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            data, target = Variable(data), Variable(target)
            output = model(data)
            test_loss += F.mse_loss(output, data).item()  # sum up batch loss
    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}'.format(test_loss))
Exemple #4
0
def test(epoch):
    model.eval()
    test_loss = 0
    for data, target in test_loader:
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target).float()
        output = model(data)
        test_loss += F.mse_loss(output, target, size_average=False).data[0]

    test_loss /= len(test_loader.dataset)
    if epoch % 10 == 0:
        print('\nTest set: Average loss: {:.4f}'.format(test_loss))
        print('Test len = {}'.format(len(test_loader.dataset)))
    return test_loss
Exemple #5
0
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.data[0]))
Exemple #6
0
def train(epoch):
    model.train()
    correct = 0
    correct_batch = 0
    for batch_idx, batch in enumerate(train_loader):
        data = batch['image']
        target = batch['label'].view(-1)
        if args.cuda:
            data = data.cuda()
            target = target.cuda()
        data, target = Variable(data).float(), Variable(target).long()
        optimizer.zero_grad()
        output = model(data)

        _, pred = torch.max(output.data, 1)
        correct_batch += pred.eq(target.data).sum()
        correct += pred.eq(target.data).sum()
        # loss = F.nll_loss(output, target)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy: {}/{}'
                .format(epoch, batch_idx * len(data),
                        len(train_loader.dataset),
                        100. * batch_idx / len(train_loader), loss.data[0],
                        correct_batch,
                        len(pred) * args.log_interval))
            correct_batch = 0

    print('\nTrain set: Accuracy: {}/{} ({:.0f}%)'.format(
        correct, len(train_loader.dataset),
        100. * correct / len(train_loader.dataset)))
    if epoch % args.save_model_epoch == 0:
        if args.aug == 0:
            torch.save(
                model.state_dict(), args.model + '_bs' + str(args.batch_size) +
                'e' + str(epoch) + '.pth')
        else:
            torch.save(
                model.state_dict(), args.model + '_bs' + str(args.batch_size) +
                'e' + str(epoch) + '_aug' + '.pth')
    return correct, 100 * correct / len(train_loader.dataset)
Exemple #7
0
def test():
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.nll_loss(
            output, target, size_average=False).data[0]  # sum up batch loss
        pred = output.data.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
Exemple #8
0
def train_sw(epoch):
    train_loss = 0
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader_sw):
        scheduler_sw.batch_step()
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target).float()
        optimizer.zero_grad()
        output = model(data)
        loss = F.mse_loss(output, target, size_average=False)
        loss.backward()
        optimizer.step()
        train_loss += loss.data[0]
    if epoch % args.save_model_epoch == 0:
        torch.save(
            model.state_dict(), 'results/' + 'weights_' + args.activation +
            '_l' + str(args.layers) + '_u' + str(args.units) + '.pth')
    if epoch % 1 == 0:
        print('Epoch {} \n \nTrain set: Average Loss: {:.4f}'.format(
            epoch, train_loss / len(train_loader_sw.dataset)))
    return train_loss / len(train_loader_sw.dataset)
Exemple #9
0
def test():
    model.eval()
    test_loss = 0
    correct = 0
    for batch in test_loader:
        data = batch['image']
        target = batch['label'].view(-1)
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(
            data, volatile=True).float(), Variable(target).long()
        output = model(data)
        test_loss += criterion(output, target).data[0]  # sum up batch loss
        # pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
        # correct += pred.eq(target.data.view_as(pred)).cpu().sum()
        _, pred = torch.max(output.data, 1)
        correct += pred.eq(target.data).sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
Exemple #10
0
temp = 1

while temp == 1:
    drone.speed = 0.08
    pygame.display.flip()
    events = pygame.event.get()
    for event in events:
        if event.type == pygame.KEYDOWN:
            if event.key == pygame.K_SPACE:
                drone.land()
                time.sleep(1)
                # drone.reset
                temp = 0
    image = image_loader()
    output = model(image)
    output = f.softmax(output, 1)
    val, ind = output.transpose(0, 1).max(0)

    if (val.data[0] < threshold):
        drone.hover()
        print('hover')
    elif ind.data[0] == 0:
        print('Class: Forward with Prob: {} \n'.format(val.data[0]))
        drone.move_forward()
    elif ind.data[0] == 1:
        print('Class: Left with Prob: {} \n'.format(val.data[0]))
        drone.move_left()
    elif ind.data[0] == 2:
        print('Class: Right with Prob: {} \n'.format(val.data[0]))
        drone.move_right()