コード例 #1
0
def train(loader, num_classes, device, net, optimizer, criterion):
    num_samples = 0
    running_loss = 0

    metrics = Metrics()

    net.train()

    for images, masks, _tile in tqdm(loader,
                                     desc="Train",
                                     unit="batch",
                                     ascii=True):

        images = images.to(device)
        masks = masks.to(device)

        assert images.size()[2:] == masks.size(
        )[1:], "resolutions for images and masks are in sync"

        num_samples += int(images.size(0))

        optimizer.zero_grad()
        outputs = net(images)

        assert outputs.size()[2:] == masks.size(
        )[1:], "resolutions for predictions and masks are in sync"
        assert outputs.size(
        )[1] == num_classes, "classes for predictions and dataset are in sync"

        loss = criterion(outputs, masks)
        loss.backward()

        optimizer.step()

        running_loss += loss.item()

        for mask, output in zip(masks, outputs):
            prediction = output.detach()
            metrics.add(mask, prediction)

    assert num_samples > 0, "dataset contains training images and labels"

    class_stats = metrics.get_classification_stats()

    return {
        "loss": running_loss / num_samples,
        "miou": metrics.get_miou(),
        "fg_iou": metrics.get_fg_iou(),
        "mcc": metrics.get_mcc(),
        "accuracy": class_stats['accuracy'],
        "precision": class_stats['precision'],
        "recall": class_stats['recall'],
        "f1": class_stats['f1']
    }
コード例 #2
0
def train(loader, config, log, device, nn, optimizer, criterion):

    num_samples = 0
    running_loss = 0

    metrics = Metrics()
    nn.train()

    for images, masks, tiles in tqdm(loader,
                                     desc="Train",
                                     unit="batch",
                                     ascii=True):
        images = images.to(device)
        masks = masks.to(device)

        assert images.size()[2:] == masks.size(
        )[1:], "resolutions for images and masks are in sync"
        num_samples += int(images.size(0))

        optimizer.zero_grad()
        outputs = nn(images)

        assert outputs.size()[2:] == masks.size(
        )[1:], "resolutions for predictions and masks are in sync"
        assert outputs.size()[1] == len(
            config["classes"]
        ), "classes for predictions and dataset are in sync"

        loss = criterion(outputs, masks, config)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

        for mask, output in zip(masks, outputs):
            prediction = output.detach()
            metrics.add(mask, prediction)

    assert num_samples > 0, "dataset contains training images and labels"

    log.log("{}{:.3f}".format("Loss:".ljust(25, " "),
                              running_loss / num_samples))
    for classe in config["classes"][1:]:
        log.log("{}{:.3f}".format((classe["title"] + " IoU:").ljust(25, " "),
                                  metrics.get_fg_iou()))
コード例 #3
0
def compare(masks, labels, tile, classes):

    x, y, z = list(map(str, tile))
    label = np.array(Image.open(os.path.join(labels, z, x, "{}.png".format(y))))
    mask = np.array(Image.open(os.path.join(masks, z, x, "{}.png".format(y))))

    assert label.shape == mask.shape
    assert len(label.shape) == 2 and len(classes) == 2  # Still binary centric

    metrics = Metrics()
    metrics.add(torch.from_numpy(label), torch.from_numpy(mask), is_prob=False)
    fg_iou = metrics.get_fg_iou()

    fg_ratio = 100 * max(np.sum(mask != 0), np.sum(label != 0)) / mask.size
    dist = 0.0 if math.isnan(fg_iou) else 1.0 - fg_iou

    qod = 100 - (dist * (math.log(fg_ratio + 1.0) + np.finfo(float).eps) * (100 / math.log(100)))
    qod = 0.0 if qod < 0.0 else qod  # Corner case prophilaxy

    return dist, fg_ratio, qod
コード例 #4
0
def validate(loader, config, log, device, nn, criterion):

    num_samples = 0
    running_loss = 0

    metrics = Metrics()
    nn.eval()

    with torch.no_grad():
        for images, masks, tiles in tqdm(loader,
                                         desc="Validate",
                                         unit="batch",
                                         ascii=True):
            images = images.to(device)
            masks = masks.to(device)

            assert images.size()[2:] == masks.size(
            )[1:], "resolutions for images and masks are in sync"
            num_samples += int(images.size(0))

            outputs = nn(images)

            assert outputs.size()[2:] == masks.size(
            )[1:], "resolutions for predictions and masks are in sync"
            assert outputs.size()[1] == len(
                config["classes"]
            ), "classes for predictions and dataset are in sync"

            loss = criterion(outputs, masks, config)
            running_loss += loss.item()

            for mask, output in zip(masks, outputs):
                metrics.add(mask, output)

    assert num_samples > 0, "dataset contains validation images and labels"

    log.log("{}{:.3f}".format("Loss:".ljust(25, " "),
                              running_loss / num_samples))
    for classe in config["classes"][1:]:
        log.log("{}{:.3f}".format((classe["title"] + " IoU:").ljust(25, " "),
                                  metrics.get_fg_iou()))
コード例 #5
0
def validate(loader, num_classes, device, net, criterion):

    num_samples = 0
    running_loss = 0

    metrics = Metrics()
    net.eval()

    with torch.no_grad():
        for images, masks, _tile in tqdm(loader,
                                         desc="Validate",
                                         unit="batch",
                                         ascii=True):
            images = images.to(device)
            masks = masks.to(device)

            assert images.size()[2:] == masks.size(
            )[1:], "resolutions for images and masks are in sync"

            num_samples += int(images.size(0))
            outputs = net(images)

            assert outputs.size()[2:] == masks.size(
            )[1:], "resolutions for predictions and masks are in sync"
            assert outputs.size(
            )[1] == num_classes, "classes for predictions and dataset are in sync"

            loss = criterion(outputs, masks)
            running_loss += loss.item()

            for mask, output in zip(masks, outputs):
                metrics.add(mask, output)

    assert num_samples > 0, "dataset contains validation images and labels"

    class_stats = metrics.get_classification_stats()

    return {
        "loss": running_loss / num_samples,
        "miou": metrics.get_miou(),
        "fg_iou": metrics.get_fg_iou(),
        "mcc": metrics.get_mcc(),
        "accuracy": class_stats['accuracy'],
        "precision": class_stats['precision'],
        "recall": class_stats['recall'],
        "f1": class_stats['f1']
    }