예제 #1
0
    parser.add_argument('--num-workers', type=int, default=0, metavar='W',
                        help='How many subprocesses to use for data loading (default: 0)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='Number of epochs to train (default: 100)')
    parser.add_argument('--patience', type=int, default=10, metavar='P',
                        help='Number of epochs with no improvement after which training will be stopped (default: 10)')
    parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
                        help='Learning rate (default: 0.0001)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='Random seed (default: 1)')
    parser.add_argument('--checkpoint', type=str, default='model.pt', metavar='M',
                        help='checkpoint file name (default: model.pt)')
    args = parser.parse_args()

    torch.manual_seed(args.seed)

    # Data Initialization and Loading
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    preprocess(args.data)
    train_loader, valid_loader = get_train_loaders(
        args.data, device, args.batch_size, args.num_workers, args.class_count)

    # Neural Network and Optimizer
    model = TrafficSignNet().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # Training and Validation
    fit(args.epochs, model, criterion, optimizer,
        train_loader, valid_loader, args.patience, args.checkpoint)
예제 #2
0
    console.setFormatter(formatter)
    logger.addHandler(console)
    logger.propagate = False

    # load predefined vocabulary and pretrained word embeddings if applicable
    vocab = pickle.load(open(os.path.join(opt.data_path, 'vocab.pkl'), 'rb'))
    opt.vocab_size = len(vocab)

    if opt.init_embeddings:
        opt.vocab_init_embeddings = os.path.join(
            opt.data_path, f'vocab.pkl.{opt.init_embeddings_key}_embeddings.npy'
        )

    # Load data loaders
    train_loader, val_loader = data.get_train_loaders(
        opt.data_path, vocab, opt.batch_size, opt.workers
    )

    # construct the model
    model = VGNSL(opt)

    best_rsum = 0
    for epoch in range(opt.num_epochs):
        adjust_learning_rate(opt, model.optimizer, epoch)

        # train for one epoch
        train(opt, train_loader, model, epoch, val_loader, vocab)

        # evaluate on validation set using VSE metrics
        rsum = validate(opt, val_loader, model, vocab)