Esempio n. 1
0
def train(model, dataset, n_epoch, batch_size, learning_rate, use_gpu=False):
    history = History()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate)

    dataset.transform = ToTensor()
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        model.train()
        for j, batch in enumerate(train_loader):

            inputs, targets = batch
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()

            optimizer.zero_grad()
            output = model(inputs)

            loss = criterion(output, targets)
            loss.backward()
            optimizer.step()

        train_acc, train_loss = validate(model, train_loader, use_gpu)
        val_acc, val_loss = validate(model, val_loader, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}'
            .format(i, train_acc, val_acc, train_loss, val_loss))
    return history
Esempio n. 2
0
def test(model,
         dataset,
         batch_size,
         regularizer_loss,
         all_param_regularized=True,
         use_gpu=False):
    dataset.transform = ToTensor()
    loader, _ = train_valid_loaders(dataset, batch_size)
    return validate(model, loader, regularizer_loss, all_param_regularized,
                    use_gpu)
Esempio n. 3
0
def train_mnist(model,
                dataset,
                n_epoch,
                batch_size,
                learning_rate,
                regularizer_loss,
                all_param_regularized=True,
                use_gpu=False):
    history = History()

    criterion = nn.CrossEntropyLoss()
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate)
    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           weight_decay=0.0)

    dataset.transform = ToTensor()
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        model.train()
        for j, batch in enumerate(train_loader):
            inputs, targets = batch
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()

            optimizer.zero_grad()
            print(type(inputs))
            print(inputs.type())
            print(inputs.shape)
            output = model(inputs)
            loss = criterion(output, targets)
            if all_param_regularized:
                loss = regularizer_loss.loss_all_params_regularized(
                    reg_loss_function=loss)
            else:
                loss = regularizer_loss.loss_regularized(
                    reg_loss_function=loss)
            loss.backward()
            optimizer.step()

        train_acc, train_loss = validate(model, train_loader, regularizer_loss,
                                         all_param_regularized, use_gpu)
        val_acc, val_loss = validate(model, val_loader, regularizer_loss,
                                     all_param_regularized, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}'
            .format(i, train_acc, val_acc, train_loss, val_loss))
    return history
Esempio n. 4
0
def train_without_regularizer(model,
                              dataset,
                              n_epoch,
                              batch_size,
                              learning_rate,
                              weight_decay,
                              use_weight_decay=False,
                              use_gpu=False):
    history = History()

    criterion = nn.CrossEntropyLoss()
    if use_weight_decay:
        optimizer = optim.Adam(model.parameters(),
                               lr=learning_rate,
                               weight_decay=weight_decay)
    else:
        optimizer = optim.Adam(model.parameters(),
                               lr=learning_rate,
                               weight_decay=0.0)
    dataset.transform = ToTensor()
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        model.train()
        for j, batch in enumerate(train_loader):
            inputs, targets = batch['data'], batch['target']
            inputs = inputs.type(torch.FloatTensor)
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()

            optimizer.zero_grad()
            output = model(inputs)
            loss = criterion(output, targets)
            loss.backward()
            optimizer.step()

        train_acc, train_loss = validate(model, train_loader, regularizer_loss,
                                         all_param_regularized, use_gpu)
        val_acc, val_loss = validate(model, val_loader, regularizer_loss,
                                     all_param_regularized, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}'
            .format(i, train_acc, val_acc, train_loss, val_loss))
    return history
def train_with_regularizer(model,
                           dataset,
                           n_epoch,
                           batch_size,
                           learning_rate,
                           regularizer_loss,
                           param_name='',
                           all_param_regularized=True,
                           use_gpu=False):
    since = time.time()
    best_model_wts = model.state_dict()
    best_acc = 0.0
    best_number_epoch = 0
    history = History()

    criterion = nn.CrossEntropyLoss()
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate)
    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           weight_decay=0.0)

    dataset.transform = ToTensor()
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        model.train()
        for j, batch in enumerate(train_loader):
            inputs, targets = batch['data'], batch['target']
            inputs = inputs.type(torch.FloatTensor)
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()
                model.cuda()

            optimizer.zero_grad()
            output = model(inputs)
            loss = criterion(output, targets)

            if all_param_regularized:
                loss = regularizer_loss.regularized_all_param(
                    reg_loss_function=loss)
            else:
                assert param_name != '', 'you must specified the name of the parameters to be regularized'
                for model_param_name, model_param_value in model.named_parameters(
                ):
                    if model_param_name == param_name:
                        loss = regularizer_loss.regularized_param(
                            param_weights=model_param_value,
                            reg_loss_function=loss)
            loss.backward()
            optimizer.step()

        train_acc, train_loss = validate_with_reg(model, train_loader,
                                                  regularizer_loss, param_name,
                                                  all_param_regularized,
                                                  use_gpu)
        val_acc, val_loss = validate_with_reg(model, val_loader,
                                              regularizer_loss, param_name,
                                              all_param_regularized, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}'
            .format(i, train_acc, val_acc, train_loss, val_loss))
        if val_acc > best_acc:
            best_number_epoch = i
            best_acc = val_acc
            best_model_wts = model.state_dict()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f} at this epoch number {}'.format(
        best_acc, best_number_epoch))
    model.load_state_dict(best_model_wts)
    return history, model
def train_without_regularizer(model,
                              dataset,
                              n_epoch,
                              batch_size,
                              learning_rate,
                              weight_decay=None,
                              use_weight_decay=False,
                              use_gpu=False):
    since = time.time()
    best_model_wts = model.state_dict()
    best_acc = 0.0
    best_number_epoch = 0
    history = History()

    criterion = nn.CrossEntropyLoss()
    if use_weight_decay:
        optimizer = optim.Adam(model.parameters(),
                               lr=learning_rate,
                               weight_decay=weight_decay)
    else:
        optimizer = optim.Adam(model.parameters(),
                               lr=learning_rate,
                               weight_decay=0)
    dataset.transform = ToTensor()
    train_loader, val_loader = train_valid_loaders(dataset,
                                                   batch_size=batch_size)

    for i in range(n_epoch):
        model.train()
        for j, batch in enumerate(train_loader):
            inputs, targets = batch['data'], batch['target']
            inputs = inputs.type(torch.FloatTensor)
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda()
                model.cuda()

            optimizer.zero_grad()
            output = model(inputs)
            loss = criterion(output, targets)
            loss.backward()
            optimizer.step()

        train_acc, train_loss = validate_without_reg(model, train_loader,
                                                     use_gpu)
        val_acc, val_loss = validate_without_reg(model, val_loader, use_gpu)
        history.save(train_acc, val_acc, train_loss, val_loss)
        print(
            'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}'
            .format(i, train_acc, val_acc, train_loss, val_loss))
        if val_acc > best_acc:
            best_number_epoch = i
            best_acc = val_acc
            best_model_wts = model.state_dict()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f} at this epoch number {}'.format(
        best_acc, best_number_epoch))
    model.load_state_dict(best_model_wts)
    return history, model
def model_test(model, dataset, batch_size, use_gpu=False):
    dataset.transform = ToTensor()
    loader, _ = train_valid_loaders(dataset, batch_size)
    return validate_without_reg(model, loader, use_gpu)