def evaluate_model(model, loader, device):
    """
    Calculate and return the accuracy (average relative error) of the mode upon validation or test set.

    model: the model to evaluate.
    loader: the dataloader of test or validation set
    device: either CPU or CUDA
    """
    model.eval()
    accuracies = []
    losses = []
    with torch.no_grad:
        """ for batch, truth in loader:
            batch = batch.to(device)
            truth = truth.to(device)"""
        for idx, thebatch in enumerate(trainloader):
            batch = torch.Tensor(thebatch["image"]).to(device)
            depth = torch.Tensor(thebatch["depth"]).to(device=device)
            pred = model(batch)
            accuracies.append(
                torch.mean(torch.abs(pred - truth) / truth).item())
            loss = DepthLoss(0.1)
            losses.append(loss(pred, truth).item())
        acc = sum(accuracies) / len(accuracies)
        loss = sum(losses) / len(losses)
        print("Evaluation accuracy: {}".format(acc))
    return acc, loss
def evaluate_model(model, loader, device):
    """
    Calculate and return the accuracy (average relative error) of the mode upon validation or test set.

    model: the model to evaluate.
    loader: the dataloader of test or validation set
    device: either CPU or CUDA
    """
    model.eval()
    model = model.to(device)
    accuracies = []
    losses = []
    for i, batch in enumerate(loader):
        X = torch.Tensor(batch["image"]).to(device)
        y = torch.Tensor(batch["depth"]).to(device)
        #batch = batch.to(device)
        #truth = truth.to(device)
        outputs = model(X)
        accuracies.append(torch.mean(torch.abs(outputs - y) / y).item())
        loss = DepthLoss(0.1)
        losses.append(loss(outputs, y).item())
    acc = sum(accuracies) / len(accuracies)
    loss = sum(losses) / len(losses)
    print("Evaluation accuracy: {}".format(acc))
    return acc, loss
def train_epoch(device, data_loader, model, criterion, optimizer):
    """
    Train the `model` for one epoch of data from `data_loader`.

    Use `optimizer` to optimize the specified `criterion`
    """
    # for i, (X, y) in enumerate(data_loader):
    for i, batch in enumerate(data_loader):
        print("trainning... batch number", i)
        optimizer.zero_grad()
        X = torch.Tensor(batch["image"]).to(device)
        y = torch.Tensor(batch["depth"]).to(device)
        outputs = model(X)
        # calculate loss
        loss = DepthLoss(0.1)
        losses = loss(outputs, y).item()
        loss.backward()
        optimizer.step()
Example #4
0
def evaluate_final_model(model, loader, device):
    """
    Calculate and return the accuracy (average relative error) of the mode upon validation or test set.

    model: the model to evaluate.
    loader: the dataloader of test or validation set
    device: either CPU or CUDA
    """
    model.eval()
    model = model.to(device)
    rel = []
    rms = []
    log10 = []
    theta1 = []
    theta2 = []
    theta3 = []
    losses = []
    with torch.no_grad():
        for i, batch in enumerate(loader):
            X = torch.Tensor(batch["image"]).to(device)
            y = torch.Tensor(batch["depth"]).to(device)
            outputs = model(X)
            rel.append(torch.mean(torch.abs(outputs - y) / y).item())
            rms.append((torch.mean((outputs / y - 1)**2)**0.5).item())
            log10.append(
                torch.mean(torch.abs(torch.log10(outputs) -
                                     torch.log10(y))).item())
            theta1.append(
                torch.sum(
                    torch.lt(torch.maximum(outputs / y, y / outputs), 1.25)) /
                y.nelement())
            theta2.append(
                torch.sum(
                    torch.lt(torch.maximum(outputs / y, y / outputs),
                             1.25 * 1.25)) / y.nelement())
            theta3.append(
                torch.sum(
                    torch.lt(torch.maximum(outputs / y, y / outputs),
                             1.25 * 1.25 * 1.25)) / y.nelement())
            loss = DepthLoss(0.1).to(device)
            losses.append(loss(outputs, y).item())
        rel = sum(rel) / len(rel)
        rms = sum(rms) / len(rms)
        log10 = sum(log10) / len(log10)
        theta1 = sum(theta1) / len(theta1)
        theta2 = sum(theta2) / len(theta2)
        theta3 = sum(theta3) / len(theta3)
        loss = sum(losses) / len(losses)
    return rel, rms, log10, theta1, theta2, theta3, loss
def main(device=torch.device('cuda:0')):
    # CLI arguments
    parser = arg.ArgumentParser(
        description='We all know what we are doing. Fighting!')
    parser.add_argument("--datasize",
                        "-d",
                        default="small",
                        type=str,
                        help="data size you want to use, small, medium, total")
    # Parsing
    args = parser.parse_args()
    # Data loaders
    datasize = args.datasize
    pathname = "data/nyu.zip"
    tr_loader, va_loader, te_loader = getTrainingValidationTestingData(
        datasize, pathname, batch_size=config("unet.batch_size"))

    # Model
    model = Net()

    # TODO: define loss function, and optimizer
    learning_rate = utils.config("unet.learning_rate")
    criterion = DepthLoss(0.1)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    number_of_epoches = 10
    #

    # print("Number of float-valued parameters:", util.count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = utils.restore_checkpoint(
        model, utils.config("unet.checkpoint"))

    # axes = utils.make_training_plot()

    # Evaluate the randomly initialized model
    # evaluate_epoch(
    #     axes, tr_loader, va_loader, te_loader, model, criterion, start_epoch, stats
    # )
    # loss = criterion()

    # initial val loss for early stopping
    # prev_val_loss = stats[0][1]

    running_va_loss = []
    running_va_acc = []
    running_tr_loss = []
    running_tr_acc = []
    # TODO: define patience for early stopping
    # patience = 1
    # curr_patience = 0
    #
    tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
    acc, loss = utils.evaluate_model(model, va_loader, device)
    running_va_acc.append(acc)
    running_va_loss.append(loss)
    running_tr_acc.append(tr_acc)
    running_tr_loss.append(tr_loss)

    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch
    # while curr_patience < patience:
    while epoch < number_of_epoches:
        # Train model
        utils.train_epoch(tr_loader, model, criterion, optimizer)
        tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
        va_acc, va_loss = utils.evaluate_model(model, va_loader, device)
        running_va_acc.append(va_acc)
        running_va_loss.append(va_loss)
        running_tr_acc.append(tr_acc)
        running_tr_loss.append(tr_loss)
        # Evaluate model
        # evaluate_epoch(
        #     axes, tr_loader, va_loader, te_loader, model, criterion, epoch + 1, stats
        # )

        # Save model parameters
        utils.save_checkpoint(model, epoch + 1,
                              utils.config("unet.checkpoint"), stats)

        # update early stopping parameters
        """
        curr_patience, prev_val_loss = early_stopping(
            stats, curr_patience, prev_val_loss
        )
        """

        epoch += 1
    print("Finished Training")
    # Save figure and keep plot open
    # utils.save_training_plot()
    # utils.hold_training_plot()
    utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss,
                    running_va_acc)
Example #6
0
def main(device, tr_loader, va_loader, te_loader, modelSelection):
    """Train CNN and show training plots."""
    # Model
    if modelSelection.lower() == 'res50':
        model = Res50()
    elif modelSelection.lower() == 'dense121':
        model = Dense121()
    elif modelSelection.lower() == 'dense161':
        model = Dense161()
    elif modelSelection.lower() == 'mobv2':
        model = Mob_v2()
    elif modelSelection.lower() == 'dense169':
        model = Dense169()
    elif modelSelection.lower() == 'mob':
        model = Net()
    elif modelSelection.lower() == 'squeeze':
        model = Squeeze()
    else:
        assert False, 'Wrong type of model selection string!'
    model = model.to(device)

    # TODO: define loss function, and optimizer
    learning_rate = utils.config(modelSelection + ".learning_rate")
    criterion = DepthLoss(0.1).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    number_of_epoches = 10
    #

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = utils.restore_checkpoint(
        model, utils.config(modelSelection + ".checkpoint"))

    running_va_loss = [] if 'va_loss' not in stats else stats['va_loss']
    running_va_acc = [] if 'va_err' not in stats else stats['va_err']
    running_tr_loss = [] if 'tr_loss' not in stats else stats['tr_loss']
    running_tr_acc = [] if 'tr_err' not in stats else stats['tr_err']
    tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
    acc, loss = utils.evaluate_model(model, va_loader, device)
    running_va_acc.append(acc)
    running_va_loss.append(loss)
    running_tr_acc.append(tr_acc)
    running_tr_loss.append(tr_loss)
    stats = {
        'va_err': running_va_acc,
        'va_loss': running_va_loss,
        'tr_err': running_tr_acc,
        'tr_loss': running_tr_loss,
    }
    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch
    # while curr_patience < patience:
    while epoch < number_of_epoches:
        # Train model
        utils.train_epoch(device, tr_loader, model, criterion, optimizer)
        # Save checkpoint
        utils.save_checkpoint(model, epoch + 1,
                              utils.config(modelSelection + ".checkpoint"),
                              stats)
        # Evaluate model
        tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
        va_acc, va_loss = utils.evaluate_model(model, va_loader, device)
        running_va_acc.append(va_acc)
        running_va_loss.append(va_loss)
        running_tr_acc.append(tr_acc)
        running_tr_loss.append(tr_loss)
        epoch += 1
    print("Finished Training")
    utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss,
                    running_va_acc)
Example #7
0
def main(device, tr_loader, va_loader, te_loader, modelSelection):
    """Train CNN and show training plots."""
    # CLI arguments
    # parser = arg.ArgumentParser(description='We all know what we are doing. Fighting!')
    # parser.add_argument("--datasize", "-d", default="small", type=str,
    #                     help="data size you want to use, small, medium, total")
    # Parsing
    # args = parser.parse_args()
    # Data loaders
    # datasize = args.datasize
    # Model
    if modelSelection.lower() == 'res50':
        model = Res50()
    elif modelSelection.lower() == 'dense121':
        model = Dense121()
    elif modelSelection.lower() == 'mobv2':
        model = Mob_v2()
    elif modelSelection.lower() == 'dense169':
        model = Dense169()
    elif modelSelection.lower() == 'mob':
        model = Net()
    elif modelSelection.lower() == 'squeeze':
        model = Squeeze()
    else:
        assert False, 'Wrong type of model selection string!'
    # Model
    # model = Net()
    # model = Squeeze()
    model = model.to(device)

    # TODO: define loss function, and optimizer
    learning_rate = utils.config(modelSelection + ".learning_rate")
    criterion = DepthLoss(0.1).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    number_of_epoches = 10
    #

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = utils.restore_checkpoint(
        model, utils.config(modelSelection + ".checkpoint"))

    running_va_loss = [] if 'va_loss' not in stats else stats['va_loss']
    running_va_acc = [] if 'va_err' not in stats else stats['va_err']
    running_tr_loss = [] if 'tr_loss' not in stats else stats['tr_loss']
    running_tr_acc = [] if 'tr_err' not in stats else stats['tr_err']
    tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
    acc, loss = utils.evaluate_model(model, va_loader, device)
    running_va_acc.append(acc)
    running_va_loss.append(loss)
    running_tr_acc.append(tr_acc)
    running_tr_loss.append(tr_loss)
    stats = {
        'va_err': running_va_acc,
        'va_loss': running_va_loss,
        'tr_err': running_tr_acc,
        'tr_loss': running_tr_loss,
        # 'num_of_epoch': 0
    }
    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch
    # while curr_patience < patience:
    while epoch < number_of_epoches:
        # Train model
        utils.train_epoch(device, tr_loader, model, criterion, optimizer)
        # Save checkpoint
        utils.save_checkpoint(model, epoch + 1,
                              utils.config(modelSelection + ".checkpoint"),
                              stats)
        # Evaluate model
        tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
        va_acc, va_loss = utils.evaluate_model(model, va_loader, device)
        running_va_acc.append(va_acc)
        running_va_loss.append(va_loss)
        running_tr_acc.append(tr_acc)
        running_tr_loss.append(tr_loss)
        epoch += 1
    print("Finished Training")
    utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss,
                    running_va_acc)
def evaluate_model(model, loader, device, test=False):
    """
    Calculate and return the accuracy (average relative error) of the mode upon validation or test set.

    model: the model to evaluate.
    loader: the dataloader of test or validation set
    device: either CPU or CUDA
    """
    model.eval()
    model = model.to(device)
    accuracies = []
    rms_error = []
    log10_error = []
    losses = []
    threshold_accuracy_1 = []
    threshold_accuracy_2 = []
    threshold_accuracy_3 = []
    delta = 1.25
    with torch.no_grad():
        for i, batch in enumerate(loader):
            X = torch.Tensor(batch["image"]).to(device)
            y = torch.Tensor(batch["depth"]).to(device)
            outputs = model(X)
            accuracies.append(torch.mean(torch.abs(outputs - y) / y).item())
            if test:
                rms_error.append((torch.mean(
                    (outputs / y - 1)**2)**0.5).item())
                log10_error.append(
                    torch.mean(torch.abs(torch.log10_(outputs / y))).item())
                threshold_accuracy_1.append(
                    torch.sum(torch.maximum(outputs / y, y /
                                            outputs) < delta).item() /
                    (y.size(0) * y.size(2) * y.size(3)))
                threshold_accuracy_2.append(
                    torch.sum(
                        torch.maximum(outputs / y, y /
                                      outputs) < delta**2).item() /
                    (y.size(0) * y.size(2) * y.size(3)))
                threshold_accuracy_3.append(
                    torch.sum(
                        torch.maximum(outputs / y, y /
                                      outputs) < delta**3).item() /
                    (y.size(0) * y.size(2) * y.size(3)))
            loss = DepthLoss(0.1).to(device)
            losses.append(loss(outputs, y).item())

        acc = sum(accuracies) / len(accuracies)
        loss = sum(losses) / len(losses)
        if test:
            rms = sum(rms_error) / len(rms_error)
            log10_err = sum(log10_error) / len(log10_error)
            threshold_1 = sum(threshold_accuracy_1) / len(threshold_accuracy_1)
            threshold_2 = sum(threshold_accuracy_2) / len(threshold_accuracy_2)
            threshold_3 = sum(threshold_accuracy_3) / len(threshold_accuracy_3)
    print("Evaluation Average Relative Error: {}".format(acc))
    if test:
        print('Evaluation root mean square error: {}'.format(rms))
        print('Evaluation log10 error: {}'.format(log10_err))
        print('Evaluation threshold accuracy (1.25): {}'.format(threshold_1))
        print('Evaluation threshold accuracy (1.25^2): {}'.format(threshold_2))
        print('Evaluation threshold accuracy (1.25^3): {}'.format(threshold_3))
    print("Evaluation Loss: {}".format(loss))
    return acc, loss
    """
    model.eval()
    accuracies = []
    losses = []
    with torch.no_grad:
        for idx, thebatch in enumerate(trainloader):
            batch = torch.Tensor(batch["image"]).to(device)
            depth = torch.Tensor(batch["depth"]).to(device=device)
"""
        for batch, truth in loader:
            batch = batch.to(device)
            truth = truth.to(device)
"""
            pred = model(batch)
            accuracies.append(torch.mean(torch.abs(pred - truth) / truth).item())
            loss = DepthLoss(0.1)
            losses.append(loss(pred, truth).item())
        acc = sum(accuracies) / len(accuracies)
        loss = sum(losses) / len(losses)
        print("Evaluation accuracy: {}".format(acc))
    return acc, loss


def train_epoch(data_loader, model, criterion, optimizer):
    """
    Train the `model` for one epoch of data from `data_loader`.

    Use `optimizer` to optimize the specified `criterion`
    """
    # for i, (X, y) in enumerate(data_loader):
    for i, batch in enumerate(data_loader):
Example #10
0
def main(device=torch.device('cuda:0')):
    """Train CNN and show training plots."""
    # Data loaders
    """
    if check_for_augmented_data("./data"):
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="target", batch_size=config("cnn.batch_size"), augment=True
        )
    else:
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="target",
            batch_size=config("cnn.batch_size"),
        )
    """
    # pathname = "data/nyu_depth.zip"
    pathname = "data/nyu_small.zip"
    tr_loader, va_loader, te_loader = getTrainingValidationTestingData(pathname,
                                                                       batch_size=util.config("unet.batch_size"))

    # Model
    model = Net()

    # TODO: define loss function, and optimizer
    learning_rate = util.config("unet.learning_rate")
    criterion = DepthLoss(0.1)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    number_of_epoches = 10
    #

    # print("Number of float-valued parameters:", util.count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = util.restore_checkpoint(model, util.config("unet.checkpoint"))

    # axes = utils.make_training_plot()

    # Evaluate the randomly initialized model
    # evaluate_epoch(
    #     axes, tr_loader, va_loader, te_loader, model, criterion, start_epoch, stats
    # )
    # loss = criterion()

    # initial val loss for early stopping
    # prev_val_loss = stats[0][1]

    running_va_loss = []
    running_va_acc = []
    running_tr_loss = []
    running_tr_acc = []
    # TODO: define patience for early stopping
    # patience = 1
    # curr_patience = 0
    #
    tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device)
    acc, loss = util.evaluate_model(model, va_loader, device)
    running_va_acc.append(acc)
    running_va_loss.append(loss)
    running_tr_acc.append(tr_acc)
    running_tr_loss.append(tr_loss)

    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch
    # while curr_patience < patience:
    while epoch < number_of_epoches:
        # Train model
        util.train_epoch(tr_loader, model, criterion, optimizer)
        tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device)
        va_acc, va_loss = util.evaluate_model(model, va_loader, device)
        running_va_acc.append(va_acc)
        running_va_loss.append(va_loss)
        running_tr_acc.append(tr_acc)
        running_tr_loss.append(tr_loss)
        # Evaluate model
        # evaluate_epoch(
        #     axes, tr_loader, va_loader, te_loader, model, criterion, epoch + 1, stats
        # )

        # Save model parameters
        util.save_checkpoint(model, epoch + 1, util.config("unet.checkpoint"), stats)

        # update early stopping parameters
        """
        curr_patience, prev_val_loss = early_stopping(
            stats, curr_patience, prev_val_loss
        )
        """

        epoch += 1
    print("Finished Training")
    # Save figure and keep plot open
    # utils.save_training_plot()
    # utils.hold_training_plot()
    util.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)