Exemple #1
0
def main():
    # Set the hyper-parameters
    learning_rate = 0.001
    batch_size = 100
    epoches = 11

    train_loader, test_loader = MNIST_dataset(batch_size)

    # Build the model
    lenet = Lenet()
    # Set the loss function & optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(lenet.parameters(), lr=learning_rate)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Train & Test the model
    training_loss = train_and_test(device,
                                   epoches,
                                   train_loader,
                                   test_loader,
                                   optimizer,
                                   criterion,
                                   model=lenet)
    plot_loss(training_loss)
Exemple #2
0
 def active_learner(device, seed):
     torch.backends.cudnn.deterministic = True
     torch.manual_seed(seed)
     torch.cuda.manual_seed(seed)
     np.random.seed(seed)
     random.seed(seed)
     model = Lenet().to(device)
     optimizer = torch.optim.Adam(model.parameters(),
                                  lr=0.001,
                                  amsgrad=True)
     return model, optimizer, None
    model_ft = Lenet()
    model_ft.conv1 = nn.Conv2d(3, 6, 3)  # 这里是彩色图像
    num_ftrs = model_ft.fc3.in_features
    # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
    model_ft.fc = nn.Linear(num_ftrs, len(class_names))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model_ft = nn.DataParallel(model_ft)

    model_ft = model_ft.to(device)

    criterion = nn.CrossEntropyLoss()

    # Observe that all parameters are being optimized
    # optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
    optimizer_ft = optim.Adam(model_ft.parameters())

    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=7,
                                           gamma=0.1)

    model_ft = train_model(model_ft,
                           criterion,
                           optimizer_ft,
                           exp_lr_scheduler,
                           num_epochs=100)