'--model',
        type=str,
        default='model.pt',
        metavar='M',
        help="the model file to be evaluated. (default: model.pt)")
    parser.add_argument(
        '--outfile',
        type=str,
        default='visualize_stn.png',
        metavar='O',
        help=
        "visualize the STN transformation on some input batch (default: visualize_stn.png)"
    )

    args = parser.parse_args()

    # Load model checkpoint
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint = torch.load(args.model, map_location=device)

    # Neural Network and Loss Function
    model = TrafficSignNet().to(device)
    model.load_state_dict(checkpoint)
    model.eval()
    criterion = nn.CrossEntropyLoss()

    # Data Initialization and Loading
    test_loader = get_test_loader(args.data, device)
    evaluate(model, criterion, test_loader)
    visualize_stn(test_loader, args.outfile)
示例#2
0
    parser.add_argument('--num-workers', type=int, default=0, metavar='W',
                        help='How many subprocesses to use for data loading (default: 0)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='Number of epochs to train (default: 100)')
    parser.add_argument('--patience', type=int, default=10, metavar='P',
                        help='Number of epochs with no improvement after which training will be stopped (default: 10)')
    parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
                        help='Learning rate (default: 0.0001)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='Random seed (default: 1)')
    parser.add_argument('--checkpoint', type=str, default='model.pt', metavar='M',
                        help='checkpoint file name (default: model.pt)')
    args = parser.parse_args()

    torch.manual_seed(args.seed)

    # Data Initialization and Loading
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    preprocess(args.data)
    train_loader, valid_loader = get_train_loaders(
        args.data, device, args.batch_size, args.num_workers, args.class_count)

    # Neural Network and Optimizer
    model = TrafficSignNet().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # Training and Validation
    fit(args.epochs, model, criterion, optimizer,
        train_loader, valid_loader, args.patience, args.checkpoint)
    )
    parser.add_argument(
        '--model',
        type=str,
        default='model.pt',
        metavar='M',
        help="the model file to be evaluated. (default: model.pt)")

    args = parser.parse_args()

    # Load model checkpoint
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint = torch.load(args.model, map_location=device)

    # Neural Network and Loss Function
    model = TrafficSignNet().to(device)
    model.load_state_dict(checkpoint)

    model_children = list(model.children())

    # load the data that are needed to be visualized
    test_loader = get_test_loader(args.data, device)

    # As instructed in the Programming assignment, Selecting 10 images from at least 5 different classes
    images = []
    images_classes = []
    dict = {}
    for x, y in test_loader:
        if (len(images) < 10):
            dict[y] = dict[y] + 1 if (y in dict) else 1
            if (dict[y] <= 2):