Пример #1
0
def train_model(model, train_loader, optimizer, opts):
    n_classes = opts.n_classes
    metric = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, n_classes), np.float)
    y_trues = np.zeros((0), np.int)
    losses = []
    model.train()

    for i, (image, label) in enumerate(tqdm(train_loader)):
        optimizer.zero_grad()
        image, label = utils.move_to([image, label], opts.device)

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())
        loss.backward()
        optimizer.step()

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)

        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, label.cpu().numpy()])

    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)

    train_loss_epoch = np.round(np.mean(losses), 4)
    return train_loss_epoch, metric_collects
Пример #2
0
def evaluate_model(model, val_loader):
    metric = torch.nn.CrossEntropyLoss()
    model.eval()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    y_probs = np.zeros((0, 3), np.float)
    losses, y_trues = [], []

    for i, (image, label, case_id) in enumerate(tqdm(val_loader)):
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1).detach().cpu().numpy()

        y_probs = np.concatenate([y_probs, y_prob])
        y_trues.append(label.item())
    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    val_loss_epoch = np.mean(losses)
    return val_loss_epoch, metric_collects
Пример #3
0
def test_model(model, test_loader):

    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    model.eval()

    y_probs = np.zeros((0, n_classes), np.float)
    y_trues = np.zeros((0), np.int)
    losses = []

    for i, (image, label) in enumerate(test_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, label.cpu().numpy()])

    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)

    test_loss_epoch = np.round(np.mean(losses), 4)
    return test_loss_epoch, metric_collects
Пример #4
0
def evaluate_model(model,
                   val_loader,
                   epoch,
                   num_epochs,
                   writer,
                   current_lr,
                   log_every=20):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    model.eval()
    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []

    for i, (image, label, case_id) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues.append(label.item())

        metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)

        n_iter = epoch * len(val_loader) + i
        writer.add_scalar('Val/Loss', loss_value, n_iter)

        if (i % log_every == 0) & (i > 0):
            prefix = '*Val|'
            utils.print_progress(epoch + 1,
                                 num_epochs,
                                 i,
                                 len(val_loader),
                                 np.mean(losses),
                                 current_lr,
                                 metric_collects,
                                 prefix=prefix)

    val_loss_epoch = np.round(np.mean(losses), 4)
    return val_loss_epoch, metric_collects
Пример #5
0
def train_model(model, train_loader, epoch, num_epochs, optimizer, writer,
                current_lr, log_every=1, n_classes=3):
    metric = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []
    model.train()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    for i, (image, label, case_id) in enumerate(train_loader):
        print(f'Starting with batch {i}, case id {case_id}')

        # if i == 18:
        # pdb.set_trace()

        optimizer.zero_grad()
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        image = torch.squeeze(image, dim=0)

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())
        loss.backward()
        optimizer.step()
        print(f'Done with batch {i}')

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues.append(label.item())

        metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
        n_iter = epoch * len(train_loader) + i
        writer.add_scalar('Train/Loss', loss_value, n_iter)

        if (i % log_every == 0) and i > 0:
            utils.print_progress(epoch + 1, num_epochs, i, len(train_loader),
                                 np.mean(losses), current_lr, metric_collects)

    train_loss_epoch = np.round(np.mean(losses), 4)
    return train_loss_epoch, metric_collects
Пример #6
0
def train_model(model,
                train_loader,
                epoch,
                num_epochs,
                optimizer,
                current_lr,
                log_every=100):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []
    model.train()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    for i, (image, label) in enumerate(train_loader):
        optimizer.zero_grad()
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())
        loss.backward()
        optimizer.step()

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        label = np.array(label)
        y_trues = np.append(y_trues, label)

    y_trues = np.array(y_trues)
    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    train_loss_epoch = np.round(np.mean(losses), 4)

    return train_loss_epoch, metric_collects
Пример #7
0
def train_model(model, train_loader, epoch, optimizer, writer, opts):
    n_classes = model.n_classes
    metric = nn.CrossEntropyLoss()

    y_probs = torch.zeros(0, n_classes, 512, 512)
    y_trues = torch.zeros(0, 512, 512).long()
    epoch_loss = 0
    model.train()

    for i, (image, mask) in enumerate(train_loader):
        optimizer.zero_grad()

        if torch.cuda.is_available():
            image = image.cuda()
            mask = mask.cuda()

        prediction = model.forward(image)

        # For the torchvision models, an OrderedDict is returned
        if isinstance(prediction, OrderedDict):
            prediction = prediction['out']

        loss = metric(prediction, mask)
        loss.backward()
        optimizer.step()

        epoch_loss += loss.item()

        y_prob = F.softmax(prediction.cpu(), dim=1)
        y_probs = torch.cat([y_probs, y_prob.detach().cpu()])
        y_trues = torch.cat([y_trues, mask.cpu()])

    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    utils.write_img(y_probs, y_trues, epoch, writer, is_train=True)

    writer.add_scalar('Training loss', epoch_loss, epoch)
    writer.add_scalar('Training accuracy', metric_collects['accuracy'], epoch)
    writer.add_scalar('Training miou', metric_collects['miou'], epoch)

    return epoch_loss, metric_collects
Пример #8
0
def evaluate_model(model, val_loader, epoch, writer, opts):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    model.eval()

    y_probs = torch.zeros(0, n_classes, 512, 512)
    y_trues = torch.zeros(0, 512, 512).long()
    epoch_loss = 0

    for i, (image, mask) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            mask = mask.cuda()

        prediction = model.forward(image)

        # For the torchvision models, an OrderedDict is returned
        if isinstance(prediction, OrderedDict):
            prediction = prediction['out']

        loss = metric(prediction, mask)

        epoch_loss += loss.item()

        y_prob = F.softmax(prediction, dim=1)
        y_probs = torch.cat([y_probs, y_prob.detach().cpu()])
        y_trues = torch.cat([y_trues, mask.cpu()])

    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    utils.write_img(y_probs, y_trues, epoch, writer, is_train=False)

    writer.add_scalar('Validation loss', epoch_loss, epoch)
    writer.add_scalar('Validation accuracy', metric_collects['accuracy'],
                      epoch)
    writer.add_scalar('Validation miou', metric_collects['miou'], epoch)

    return epoch_loss, metric_collects