コード例 #1
0
def main(args):
    input_size = 3
    output_size = 1

    # Make the training and testing set to be less bias
    fire_dataset = FireDataset(args.csv_path[0])

    fire_train, fire_test = random_split(
        fire_dataset,
        (round(0.7 * len(fire_dataset)), round(0.3 * len(fire_dataset))))

    trainloader = DataLoader(fire_train,
                             batch_size=4096,
                             shuffle=True,
                             num_workers=2)
    testloader = DataLoader(fire_test,
                            batch_size=512,
                            shuffle=False,
                            num_workers=2)

    save_weights_pth = args.weights_path[0]

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = MLP(input_size=input_size, output_size=output_size)
    model.to(device)

    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=5e-7)
    epochs = 30

    if args.eval_only:
        do_test(model, device, testloader, save_weights_pth)
    else:
        do_train(model, device, trainloader, criterion, optimizer, epochs,
                 save_weights_pth)
        do_test(model, device, testloader, save_weights_pth)
コード例 #2
0
ファイル: run_pytorch.py プロジェクト: PoeWunLee/hlb-datathon
def main(args):
    input_size = 5
    output_size = 3
    epochs = 100

    # Make the training and testing set to be less bias
    hlb_dataset = HLBDataset(args.dataset_path[0])

    hlb_train, hlb_test = random_split(
        hlb_dataset,
        (round(0.8 * len(hlb_dataset)), round(0.2 * len(hlb_dataset))))

    print(args.weights_path[0])
    print(f'Number of training examples: {len(hlb_train)}')
    print(f'Number of testing examples: {len(hlb_test)}')

    trainloader = DataLoader(hlb_train,
                             batch_size=2048,
                             shuffle=True,
                             num_workers=2)
    testloader = DataLoader(hlb_test,
                            batch_size=1024,
                            shuffle=False,
                            num_workers=2)

    save_weights_pth = args.weights_path[0]

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = MLP(input_size=input_size, output_size=output_size)
    print(model)

    model.to(device)

    weights = torch.tensor([0.85, 0.25, 1.0])

    criterion = nn.CrossEntropyLoss(weight=weights).cuda()
    learning_rate = 1e-6
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    train_acc_array = []
    train_loss_array = []
    test_acc_array = []
    test_loss_array = []
    for i in range(epochs):  # loop over the dataset multiple times
        train_loss, train_acc = do_train(model, device, trainloader, criterion,
                                         optimizer)

        print('Epoch {} Train loss: {} Train acc: {}'.format(
            i, train_loss, train_acc))
        train_acc_array.append(train_acc)
        train_loss_array.append(train_loss)

        test_loss, test_acc = do_test(model, device, testloader, criterion)
        test_acc_array.append(test_acc)
        test_loss_array.append(test_loss)

        print('Test loss: {} Test acc: {}'.format(test_loss, test_acc))

        if i % 50 == 0:
            learning_rate = learning_rate * 0.99
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate

    torch.save(model.state_dict(), args.weights_path[0])
    save_data(train_loss_array, train_acc_array, test_loss_array,
              test_acc_array)
    plot_graph(train_loss_array, train_acc_array, test_loss_array,
               test_acc_array)