Esempio n. 1
0
def train(model, optimizer, train_loader, criterion, entropy_loss_func, opts):
    """ Train for a single epoch """

    y_probs = np.zeros((0, len(train_loader.dataset.CLASSES)), np.float)
    y_trues = np.zeros((0), np.int)
    losses = []

    # Put model in training mode
    model.train()

    for i, (x_low, x_high, label) in enumerate(tqdm(train_loader)):
        x_low, x_high, label = move_to([x_low, x_high, label], opts.device)

        optimizer.zero_grad()
        y, attention_map, patches, x_low = model(x_low, x_high)

        entropy_loss = entropy_loss_func(attention_map)

        loss = criterion(y, label) - entropy_loss
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), opts.clipnorm)
        optimizer.step()

        loss_value = loss.item()
        losses.append(loss_value)

        y_prob = F.softmax(y, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, label.cpu().numpy()])

    train_loss_epoch = np.round(np.mean(losses), 4)
    metrics = calc_cls_measures(y_probs, y_trues)
    return train_loss_epoch, metrics
Esempio n. 2
0
def evaluate(model, test_loader, criterion, entropy_loss_func, opts):
    """ Evaluate a single epoch """

    y_probs = np.zeros((0, len(test_loader.dataset.CLASSES)), np.float)
    y_trues = np.zeros((0), np.int)
    losses = []

    # Put model in eval mode
    model.eval()

    for i, (x_low, x_high, label) in enumerate(tqdm(test_loader)):

        x_low, x_high, label = move_to([x_low, x_high, label], opts.device)

        y, attention_map, patches, x_low = model(x_low, x_high)

        entropy_loss = entropy_loss_func(attention_map)
        loss = criterion(y, label) - entropy_loss

        loss_value = loss.item()
        losses.append(loss_value)

        y_prob = F.softmax(y, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, label.cpu().numpy()])

    test_loss_epoch = np.round(np.mean(losses), 4)
    metrics = calc_cls_measures(y_probs, y_trues)
    return test_loss_epoch, metrics
Esempio n. 3
0
def train_one_epoch(model, criterion, data_loader, optimiser, device):
    """Trains an input PyTorch model for one epoch

    Args:
        model (torch.nn.Module): pytorch model
        criterion (torch.nn): loss function
        data_loader (torch.utils.data.DataLoader): training data loader
        optimiser (torch.optim): optimiser
        device (torch.device): which device to train the model on

    Returns:
        np.float, dict: return training loss for one epoch and a dictionary of training metrics calculated for that epoch
    """
    y_probs = np.zeros((0, len(data_loader.dataset.CLASSES)), np.float)
    y_trues = np.zeros((0), np.int)
    losses = []
    model.train()

    for i, (x, labels) in enumerate(data_loader):
        x = x.to(device, non_blocking=True)
        labels = labels.to(device, non_blocking=True)

        # zero parameter gradients
        optimiser.zero_grad(set_to_none=True)

        # compute forward, backward pass and optimise
        with torch.cuda.amp.autocast():
            outputs = model(x)
            loss = criterion(outputs, labels)

        loss.backward()
        optimiser.step()
        loss_value = loss.item()

        if not math.isfinite(loss_value):
            print(f"Loss is {loss_value}, stopping training")
            sys.exit(1)

        losses.append(loss_value)

        y_prob = F.softmax(outputs, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, labels.cpu().numpy()])

    train_loss_epoch = np.round(np.mean(losses), 4)
    metrics = calc_cls_measures(y_probs, y_trues)

    return train_loss_epoch, metrics
Esempio n. 4
0
def evaluate(data_loader, model, device):
    """Evaluates a PyTorch model on a validation dataset

    Args:
        data_loader (torch.utils.data.DataLoader): validation data loader
        model (torch.nn.Module): PyTorch model to evaluate
        device (torch.device): device to run function on

    Returns:
        np.float, dict: returns validation loss and dict of validation metrics after one epoch
    """
    criterion = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, len(data_loader.dataset.CLASSES)), np.float)
    y_trues = np.zeros((0), np.int)
    losses = []

    # switch to evaluation mode
    model.eval()

    for i, (x, labels) in enumerate(data_loader):
        x = x.to(device, non_blocking=True)
        labels = labels.to(device, non_blocking=True)

        # compute output
        with torch.cuda.amp.autocast():
            outputs = model(x)
            loss = criterion(outputs, labels)

        loss_value = loss.item()
        losses.append(loss_value)

        y_prob = F.softmax(outputs, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, labels.cpu().numpy()])

    val_loss_epoch = np.round(np.mean(losses), 4)
    metrics = calc_cls_measures(y_probs, y_trues)
    return val_loss_epoch, metrics
Esempio n. 5
0
def evaluateMultiResBatches(model, test_loader, criterion, entropy_loss_func,
                            opts):
    """ Train for a single epoch """

    y_probs = np.zeros((0, len(test_loader.dataset.CLASSES)), np.float)
    y_trues = np.zeros((0), np.int)
    losses = [[] for s in opts.scales]
    metrics = []
    # Put model in eval mode
    model.eval()

    all_patches = []
    all_maps = []
    all_x_low = []
    all_sampled_ats = []
    for i, (x_low, x_high, label) in enumerate(tqdm(test_loader)):
        # high res batch
        x_low, x_high, label = move_to([x_low, x_high, label], opts.device)

        y, attention_map, patches, x_low_out, sampled_attention = model(
            x_low, x_high)
        if opts.visualize:
            all_patches.append(patches)
            all_maps.append(attention_map)
            all_x_low.append(x_low_out)
            all_sampled_ats.append(sampled_attention)
        entropy_loss = entropy_loss_func(attention_map)

        loss = criterion(y, label) - entropy_loss

        loss_value = loss.item()
        losses[0].append(loss_value)

        y_prob = F.softmax(y, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, label.cpu().numpy()])

        metric = calc_cls_measures(y_probs, y_trues)
        metrics.append(metric)

        # scale-2 low res batch
        for i in range(1, len(opts.scales)):
            s = opts.scales[i]
            x_low_i = F.interpolate(x_low, scale_factor=s, mode='bilinear')
            x_high_i = F.interpolate(x_high, scale_factor=s, mode='bilinear')

            x_low_i, x_high_i = move_to([x_low_i, x_high_i], opts.device)

            y, attention_map, patches, x_low_i_out, sampled_attention = model(
                x_low_i, x_high_i)

            if opts.visualize:
                all_patches.append(patches)
                all_maps.append(attention_map)
                all_x_low.append(x_low_i_out)
                all_sampled_ats.append(sampled_attention)
            entropy_loss = entropy_loss_func(attention_map)

            loss = criterion(y, label) - entropy_loss

            loss_value = loss.item()

            losses[i].append(loss_value)

            y_prob = F.softmax(y, dim=1)
            y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
            y_trues = np.concatenate([y_trues, label.cpu().numpy()])

            metric = calc_cls_measures(y_probs, y_trues)
            metrics.append(metric)

        if opts.visualize:
            all_patches_tensor = torch.cat(all_patches, dim=1)
            # all_maps_tensor = torch.stack(all_maps, dim=1)
            for b in range(patches.shape[0]):
                batch_patches = all_patches_tensor[b]
                batch_maps = [
                    attention_map[b].cpu().numpy()
                    for attention_map in all_maps
                ]
                for ats in batch_maps:
                    print(ats)
                    # print(torch.min())
                batch_imgs = [x_low_i[b] for x_low_i in all_x_low]
                batch_sampled_ats = [
                    sampled_attetion[b].cpu().numpy()
                    for sampled_attetion in all_sampled_ats
                ]
                print(batch_sampled_ats)
                patchGrid(batch_patches, batch_maps, batch_imgs, (3, 5))
                # mapGrid(batch_maps, batch_imgs, opts.scales)

    test_loss_epoch = [np.round(np.mean(loss_s), 4) for loss_s in losses]
    # metrics = calc_cls_measures(y_probs, y_trues)
    return test_loss_epoch, metrics
Esempio n. 6
0
def trainMultiResBatches(model, optimizer, train_loader, criterion,
                         entropy_loss_func, opts):
    """ Train for a single epoch """

    y_probs = np.zeros((0, len(train_loader.dataset.CLASSES)), np.float)
    y_trues = np.zeros((0), np.int)
    losses = [[] for s in opts.scales]
    metrics = []
    # Put model in training mode
    model.train()

    for i, (x_low, x_high, label) in enumerate(tqdm(train_loader)):
        # high res batch
        x_low, x_high, label = move_to([x_low, x_high, label], opts.device)

        optimizer.zero_grad()
        y, attention_map, patches, x_low_out = model(x_low, x_high)

        entropy_loss = entropy_loss_func(attention_map)

        loss = criterion(y, label) - entropy_loss
        loss.backward()
        # for p in model.parameters():
        #     print(p.grad)
        torch.nn.utils.clip_grad_norm_(model.parameters(), opts.clipnorm)
        optimizer.step()

        loss_value = loss.item()
        losses[0].append(loss_value)

        y_prob = F.softmax(y, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, label.cpu().numpy()])

        metric = calc_cls_measures(y_probs, y_trues)
        metrics.append(metric)

        # scale-2 low res batch
        for i in range(1, len(opts.scales)):
            s = opts.scales[i]
            x_low_i = F.interpolate(x_low, scale_factor=s, mode='bilinear')
            x_high_i = F.interpolate(x_high, scale_factor=s, mode='bilinear')

            x_low_i, x_high_i = move_to([x_low_i, x_high_i], opts.device)

            optimizer.zero_grad()
            y, attention_map, patches, x_low_i_out = model(x_low_i, x_high_i)

            entropy_loss = entropy_loss_func(attention_map)

            loss = criterion(y, label) - entropy_loss

            loss.backward()

            torch.nn.utils.clip_grad_norm_(model.parameters(), opts.clipnorm)

            optimizer.step()

            loss_value = loss.item()

            losses[i].append(loss_value)

            y_prob = F.softmax(y, dim=1)
            y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
            y_trues = np.concatenate([y_trues, label.cpu().numpy()])

            metric = calc_cls_measures(y_probs, y_trues)
            metrics.append(metric)

    train_loss_epoch = [np.round(np.mean(loss_s), 4) for loss_s in losses]
    # metrics = calc_cls_measures(y_probs, y_trues)
    return train_loss_epoch, metrics
Esempio n. 7
0
def evaluateMultiRes(model, test_loader, criterion, entropy_loss_func, opts):
    """ Evaluate a single epoch """

    y_probs = np.zeros((0, len(test_loader.dataset.CLASSES)), np.float)
    y_trues = np.zeros((0), np.int)
    losses = []

    # Put model in eval mode
    model.eval()

    for i, (x_lows, x_highs, label) in enumerate(tqdm(test_loader)):

        x_lows, x_highs, label = move_to([x_lows, x_highs, label], opts.device)

        y, attention_maps, patches, x_lows = model(x_lows, x_highs)

        ## visualize
        # for i, (scale, x_low) in  enumerate(zip(model.scales, x_lows)):
        #     if type(attention_maps) is list:
        #         ats_map = attention_maps[i]
        #         showPatch()

        if type(attention_maps) is list:

            entropy_loss = torch.tensor([
                entropy_loss_func(attention_map)
                for attention_map in attention_maps
            ]).sum() / len(opts.scales)

            loss = criterion(y, label) - entropy_loss
        else:
            entropy_loss = entropy_loss_func(attention_maps)
            loss = criterion(y, label) - entropy_loss

        loss_value = loss.item()
        losses.append(loss_value)

        y_prob = F.softmax(y, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues = np.concatenate([y_trues, label.cpu().numpy()])

        if opts.visualize:
            for b in range(patches.shape[0]):
                batch_patches = patches[b]
                # patchGrid(batch_patches, (3, 5))
                if type(attention_maps) is list:
                    batch_maps = [
                        attention_map[b].cpu().numpy()
                        for attention_map in attention_maps
                    ]
                    for attention_map in batch_maps:
                        print(np.max(attention_map))
                        print(np.min(attention_map))
                    # batch_maps = [attention_maps[i][b] for i in range(len(model.scales))]
                else:
                    # batch_maps = [attention_maps[b] for i in range(len(model.scales))]
                    batch_maps = [attention_maps[b].cpu().numpy()]
                batch_imgs = [x_lows[i][b] for i in range(len(model.scales))]
                # mapGrid(batch_maps, batch_imgs, model.scales)
                patchGrid(batch_patches, batch_maps, batch_imgs, (3, 5))

    test_loss_epoch = np.round(np.mean(losses), 4)
    metrics = calc_cls_measures(y_probs, y_trues)
    return test_loss_epoch, metrics