def init_inference(model_kwargs):
    global model
    global device
    model = CustomModel(**model_kwargs)
    model.eval()

    if args.trt_module :
        from torch2trt import TRTModule
        if args.trt_conversion :
            model.load_state_dict(torch.load(args.pretrained_model))
            model = model.cuda()
            x = torch.ones((1, 3, 240, 320)).cuda()
            from torch2trt import torch2trt
            model_trt = torch2trt(model, [x], max_batch_size=100, fp16_mode=True)
            #model_trt = torch2trt(model, [x], max_batch_size=100)
            torch.save(model_trt.state_dict(), args.trt_model)
            exit()
        model_trt = TRTModule()
        #model_trt.load_state_dict(torch.load('road_following_model_trt_half.pth'))
        model_trt.load_state_dict(torch.load(args.trt_model))
        model = model_trt.to(device)
    else :
        model.load_state_dict(torch.load(args.pretrained_model))
        model = model.to(device)
Exemple #2
0
        inputs, labels = data

        inputs, labels = inputs.to(device), labels.to(device)
        outputs = net(inputs)

        optimizer.zero_grad()  # reset gradients for a new calculation

        loss_size = loss(outputs, labels)

        loss_size.backward()  # back-propagation of the loss

        epoch_loss += loss_size.data

        optimizer.step(
        )  # optimizer step based on the back-propagation results

net.eval()  # freezes the layers from learning to evaluate the network
correct = 0
total = 0

for i, data in enumerate(img_loader, 0):
    inputs, labels = data
    inputs, labels = inputs.to(device), labels.to(device)
    outputs = net(inputs)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)  # get the total number of images
    correct += (
        predicted == labels).sum().item()  # get if the prediction is correct

print("Accuracy: {:2f}".format(100 * correct / total))