def get_loss_train(model, data_train, criterion, wo_mask=True):
    """
        Calculate loss over train set
    """
    model.eval()
    total_acc = 0
    total_loss = 0
    for batch, (images, masks) in enumerate(data_train):
        with torch.no_grad():
            images = Variable(images.cuda())
            masks = Variable(masks.cuda())
            outputs = model(images)

            if wo_mask:
                cropped_images = extract_img(outputs.size(2), images)
                loss = criterion(outputs, cropped_images)
            else:
                loss = criterion(outputs, masks)

            if outputs.size(1) > 1:
                preds = torch.argmax(outputs, dim=1).float()
            else:
                preds = (outputs > 0).float()

            acc = accuracy_check_for_batch(masks.cpu(), preds.cpu(), images.size()[0])
            total_acc = total_acc + acc
            total_loss = total_loss + loss.cpu().item()
    return total_acc/(batch+1), total_loss/(batch + 1)
def validate_model(model, data_val, criterion, epoch, make_prediction=True, save_folder_name='prediction'):
    """
        Validation run
    """
    # calculating validation loss
    total_val_loss = 0
    total_val_acc = 0
    for batch, (images_v, masks_v) in enumerate(data_val):
        with torch.no_grad():
            images_v = Variable(images_v.cuda())
            masks_v = Variable(masks_v.cuda())
            #print(images_v.shape, masks_v.shape)
            outputs_v = model(images_v)
            total_val_loss = total_val_loss + criterion(outputs_v, masks_v).cpu().item()
            #print('out', outputs_v.shape)
            outputs_v = torch.argmax(outputs_v, dim=1).float()
            #print(outputs_v.shape)
        if make_prediction:
            im_name = batch  # TODO: Change this to real image name so we know
            pred_msk = save_prediction_image(outputs_v, im_name, epoch, save_folder_name)
            #acc_val = accuracy_check(masks_v, pred_msk)
            acc_val = accuracy_check_for_batch(masks_v.cpu(), outputs_v.cpu(), images_v.size()[0])
            total_val_acc = total_val_acc + acc_val

    return total_val_acc/(batch + 1), total_val_loss/((batch + 1))
def get_loss_train(model, data_train, criterion):
    """
        Calculate loss over train set
    """
    model.eval() # 测试时加上,不启用 BatchNormalization 和 Dropout
    total_acc = 0
    total_loss = 0
    for batch, (images, masks) in enumerate(data_train):
        with torch.no_grad(): #推理阶段(预测阶段),不更新梯度
            images = Variable(images.cuda())
            masks = Variable(masks.cuda())
            outputs = model(images)
            loss = criterion(outputs, masks)
            preds = torch.argmax(outputs, dim=1).float() # 选概率高的类别
            acc = accuracy_check_for_batch(masks.cpu(), preds.cpu(), images.size()[0]) # 计算该batch的acc,images.size()[0]=batch_size
            total_acc = total_acc + acc
            total_loss = total_loss + loss.cpu().item()
    return total_acc/(batch+1), total_loss/(batch + 1)
Exemple #4
0
def get_loss_train(model, data_train, criterion):
    """
        Calculate loss over train set
    """
    model.eval()
    total_acc = 0
    total_loss = 0
    for batch, (images, masks) in enumerate(data_train):
        with torch.no_grad():
            images = Variable(images)
            masks = Variable(masks)
            images.shape
            masks.shape
            outputs = model(images)
            loss = criterion(outputs, masks)
            preds = torch.argmax(outputs, dim=1).float()
            acc = accuracy_check_for_batch(masks.cpu(), preds.cpu(),
                                           images.size()[0])
            total_acc = total_acc + acc
            total_loss = total_loss + loss.cpu().item()
    return total_acc / (batch + 1), total_loss / (batch + 1)