예제 #1
0
def valid(valid_loader, model, epoch, logger):
    model.eval()  # eval mode (dropout and batchnorm is NOT used)

    losses = AverageMeter()

    # Batches
    for i, (img, alpha_label) in enumerate(valid_loader):
        # Move to GPU, if available
        img = img.type(torch.FloatTensor).to(device)  # [N, 3, 320, 320]
        alpha_label = alpha_label.type(torch.FloatTensor).to(device)  # [N, 320, 320]
        alpha_label = alpha_label.reshape((-1, 2, im_size * im_size))  # [N, 320*320]

        # Forward prop.
        alpha_out = model(img)  # [N, 320, 320]
        alpha_out = alpha_out.reshape((-1, 1, im_size * im_size))  # [N, 320*320]

        # Calculate loss
        # loss = criterion(alpha_out, alpha_label)
        loss = alpha_prediction_loss(alpha_out, alpha_label)

        # Keep track of metrics
        losses.update(loss.item())

        if i % print_freq == 0:
            status = 'Epoch: [{0}][{1}/{2}]\t' \
                     'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(valid_loader), loss=losses)
            logger.info(status)
    # Print status
    status = 'Validation: Loss {loss.avg:.4f}\n'.format(loss=losses)

    logger.info(status)

    return losses.avg
예제 #2
0
def valid(valid_loader, model, logger):
    model.eval()  # eval mode (dropout and batchnorm is NOT used)

    losses = AverageMeter()

    # Batches
    for img, alpha_label in tqdm(valid_loader):
        # Move to GPU, if available
        img = img.type(torch.FloatTensor).to(device)  # [N, 4, 320, 320]
        alpha_label = alpha_label.type(torch.FloatTensor).to(device)  # [N, 2, 320, 320]
        alpha_label = alpha_label.reshape((-1, 2, im_size * im_size))  # [N, 2, 320*320]

        # Forward prop.
        alpha_out = model(img)  # [N, 320, 320]
        alpha_out = alpha_out.reshape((-1, 1, im_size * im_size))  # [N, 320*320]

        # Calculate loss
        # loss = criterion(alpha_out, alpha_label)
        loss = alpha_prediction_loss(alpha_out, alpha_label)

        # Keep track of metrics
        losses.update(loss.item())

    # Print status
    status = 'Validation: Loss {loss.avg:.4f}\n'.format(loss=losses)
    logger.info(status)

    return losses.avg
예제 #3
0
def valid(valid_loader, model, logger, device):
    model.eval()  # eval mode (dropout and batchnorm is NOT used)

    losses = AverageMeter()

    with torch.no_grad():
        para_valid_loader = pl.ParallelLoader(valid_loader, [device]).per_device_loader(device)
        for i, batch in enumerate(para_valid_loader):
            img, alpha_label = batch
    # # Batches
    # for img, alpha_label in valid_loader:
            # Move to GPU, if available
            img = img.type(torch.FloatTensor).to(device)  # [N, 3, 320, 320]
            alpha_label = alpha_label.type(torch.FloatTensor).to(device)  # [N, 320, 320]
            alpha_label = alpha_label.reshape((-1, 2, im_size * im_size))  # [N, 320*320]

            # Forward prop.
            alpha_out = model(img)  # [N, 320, 320]
            alpha_out = alpha_out.reshape((-1, 1, im_size * im_size))  # [N, 320*320]

            # Calculate loss
            # loss = criterion(alpha_out, alpha_label)
            loss = alpha_prediction_loss(alpha_out, alpha_label)

            # Keep track of metrics
            losses.update(loss.item())

    # Print status
    status = 'Validation: Loss {loss.avg:.4f}\n'.format(loss=losses)

    logger.info(status)

    return losses.avg
def train(train_loader, model, optimizer, epoch, logger):
    torch.nn.Module.dump_patches = True
    model.train()  # train mode (dropout and batchnorm is used)

    losses = AverageMeter()

    # Batches
    for i, (img, alpha_label) in enumerate(train_loader):
        # Move to GPU, if available
        img = img.type(torch.FloatTensor).to(device)  # [N, 4, 320, 320]
        alpha_label = alpha_label.type(torch.FloatTensor).to(
            device)  # [N, 320, 320]
        alpha_label = alpha_label.reshape(
            (-1, 2, im_size * im_size))  # [N, 320*320]

        # Forward prop.
        alpha_out = model(img)  # [N, 3, 320, 320]
        alpha_out = alpha_out.reshape(
            (-1, 1, im_size * im_size))  # [N, 320*320]

        # Calculate loss
        # loss = criterion(alpha_out, alpha_label)
        loss = alpha_prediction_loss(alpha_out, alpha_label)

        # Back prop.
        optimizer.zero_grad()
        loss.backward()

        # Clip gradients
        clip_gradient(optimizer, grad_clip)

        # Update weights
        optimizer.step()

        # Keep track of metrics
        losses.update(loss.item())

        # Print status

        if i % print_freq == 0:
            status = 'Epoch: [{0}][{1}/{2}]\t' \
                     'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses)
            logger.info(status)

    return losses.avg
예제 #5
0
def train(train_loader, model, optimizer, epoch, logger):
    model.train()  # train mode (dropout and batchnorm is used)

    losses = AverageMeter()

    # Batches
    for i, (img, alpha_label, image, fg, bg) in enumerate(train_loader):
        # Move to GPU, if available
        img = img.type(torch.FloatTensor).to(device)  # [N, 4, 320, 320]
        alpha_label = alpha_label.type(torch.FloatTensor).to(device)  # [N, 2, 320, 320]
        image = image.type(torch.FloatTensor).to(device)
        fg = fg.type(torch.FloatTensor).to(device)
        bg = bg.type(torch.FloatTensor).to(device)

        # Forward prop.
        alpha_out = model(img)  # [N, 320, 320] 

        # Calculate loss
        # loss = criterion(alpha_out, alpha_label)
        alpha_loss = alpha_prediction_loss(alpha_out, alpha_label)
        comp_loss = composition_loss(alpha_out, alpha_label, image, fg, bg)
        w_l = 0.5
        loss = w_l * alpha_loss + (1 - w_l) * comp_loss

        # Back prop.
        optimizer.zero_grad()
        loss.backward()

        # Clip gradients
        clip_gradient(optimizer, grad_clip)

        # Update weights
        optimizer.step()

        # Keep track of metrics
        losses.update(loss.item())

        # Print status

        if i % print_freq == 0:
            status = 'Epoch: [{0}][{1}/{2}]\t' \
                     'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses)
            logger.info(status)

    return losses.avg
예제 #6
0
def valid(valid_loader, model, epoch, logger):
    model.eval()  # eval mode (dropout and batchnorm is NOT used)

    losses = AverageMeter()

    # Batches
    for i, (img, alpha_label, image, fg, bg) in enumerate(valid_loader):
        # Move to GPU, if available
        img = img.type(torch.FloatTensor).to(device)  # [N, 4, 320, 320]
        alpha_label = alpha_label.type(torch.FloatTensor).to(device)  # [N, 2, 320, 320]
        image = image.type(torch.FloatTensor).to(device)
        fg = fg.type(torch.FloatTensor).to(device)
        bg = bg.type(torch.FloatTensor).to(device)

        # Forward prop.
        alpha_out = model(img)  # [N, 320, 320] 

        # Calculate loss
        # loss = criterion(alpha_out, alpha_label)
        alpha_loss = alpha_prediction_loss(alpha_out, alpha_label)
        comp_loss = composition_loss(alpha_out, alpha_label, image, fg, bg)
        w_l = 0.5
        loss = w_l * alpha_loss + (1 - w_l) * comp_loss

        # Keep track of metrics
        losses.update(loss.item())

        if i % print_freq == 0:
            status = 'Epoch: [{0}][{1}/{2}]\t' \
                     'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(valid_loader), loss=losses)
            logger.info(status)
    # Print status
    status = 'Validation: Loss {loss.avg:.4f}\n'.format(loss=losses)

    logger.info(status)

    return losses.avg