示例#1
0
    def net_from_chkpt_(self):
        def map_location(storage, _):
            return storage.cuda() if self.cuda else storage.cpu()

        # https://github.com/pytorch/pytorch/issues/7178
        chkpt = torch.load(self.checkpoint, map_location=map_location)

        num_classes = len(self.dataset['common']['classes'])

        net = UNet(num_classes).to(self.device)
        net = nn.DataParallel(net)

        if self.cuda:
            torch.backends.cudnn.benchmark = True

        net.load_state_dict(chkpt)
        net.eval()

        return net
    def net_from_chkpt_(self):
        def map_location(storage, _):
            return storage.cuda() if self.cuda else storage.cpu()

        # https://github.com/pytorch/pytorch/issues/7178
        chkpt = torch.load(self.checkpoint, map_location=map_location)

        num_classes = len(self.dataset['common']['classes'])

        net = UNet(num_classes).to(self.device)
        net = nn.DataParallel(net)

        if self.cuda:
            torch.backends.cudnn.benchmark = True

        net.load_state_dict(chkpt)
        net.eval()

        return net
示例#3
0
def main(args):
    model = load_config(args.model)
    dataset = load_config(args.dataset)

    cuda = model["common"]["cuda"]

    device = torch.device("cuda" if cuda else "cpu")

    def map_location(storage, _):
        return storage.cuda() if cuda else storage.cpu()

    if cuda and not torch.cuda.is_available():
        sys.exit("Error: CUDA requested but not available")

    num_classes = len(dataset["common"]["classes"])

    # https://github.com/pytorch/pytorch/issues/7178
    chkpt = torch.load(args.checkpoint, map_location=map_location)

    net = UNet(num_classes).to(device)
    net = nn.DataParallel(net)

    if cuda:
        torch.backends.cudnn.benchmark = True

    net.load_state_dict(chkpt["state_dict"])
    net.eval()

    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = Compose([
        ConvertImageMode(mode="RGB"),
        ImageToTensor(),
        Normalize(mean=mean, std=std)
    ])

    directory = BufferedSlippyMapDirectory(args.tiles,
                                           transform=transform,
                                           size=args.tile_size,
                                           overlap=args.overlap)
    loader = DataLoader(directory,
                        batch_size=args.batch_size,
                        num_workers=args.workers)

    # don't track tensors with autograd during prediction
    with torch.no_grad():
        for images, tiles in tqdm(loader,
                                  desc="Eval",
                                  unit="batch",
                                  ascii=True):
            images = images.to(device)
            outputs = net(images)

            # manually compute segmentation mask class probabilities per pixel
            probs = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()

            for tile, prob in zip(tiles, probs):
                x, y, z = list(map(int, tile))

                # we predicted on buffered tiles; now get back probs for original image
                prob = directory.unbuffer(prob)

                # Quantize the floating point probabilities in [0,1] to [0,255] and store
                # a single-channel `.png` file with a continuous color palette attached.

                assert prob.shape[
                    0] == 2, "single channel requires binary model"
                assert np.allclose(
                    np.sum(prob, axis=0), 1.
                ), "single channel requires probabilities to sum up to one"
                foreground = prob[1:, :, :]

                anchors = np.linspace(0, 1, 256)
                quantized = np.digitize(foreground, anchors).astype(np.uint8)

                palette = continuous_palette_for_color("pink", 256)

                out = Image.fromarray(quantized.squeeze(), mode="P")
                out.putpalette(palette)

                os.makedirs(os.path.join(args.probs, str(z), str(x)),
                            exist_ok=True)
                path = os.path.join(args.probs, str(z), str(x),
                                    str(y) + ".png")

                out.save(path, optimize=True)
def main(args):
    model = load_config(args.model)
    dataset = load_config(args.dataset)

    cuda = model['common']['cuda']

    device = torch.device('cuda' if cuda else 'cpu')

    def map_location(storage, _):
        return storage.cuda() if cuda else storage.cpu()

    if cuda and not torch.cuda.is_available():
        sys.exit('Error: CUDA requested but not available')

    num_classes = len(dataset['common']['classes'])

    # https://github.com/pytorch/pytorch/issues/7178
    chkpt = torch.load(args.checkpoint, map_location=map_location)

    net = UNet(num_classes).to(device)
    net = nn.DataParallel(net)

    if cuda:
        torch.backends.cudnn.benchmark = True

    net.load_state_dict(chkpt)
    net.eval()

    transform = Compose([
        ConvertImageMode(mode='RGB'),
        ImageToTensor(),
        Normalize(mean=dataset['stats']['mean'], std=dataset['stats']['std'])
    ])

    directory = BufferedSlippyMapDirectory(args.tiles, transform=transform, size=args.tile_size, overlap=args.overlap)
    loader = DataLoader(directory, batch_size=args.batch_size)

    # don't track tensors with autograd during prediction
    with torch.no_grad():
        for images, tiles in tqdm(loader, desc='Eval', unit='batch', ascii=True):
            images = images.to(device)
            outputs = net(images)

            # manually compute segmentation mask class probabilities per pixel
            probs = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()

            for tile, prob in zip(tiles, probs):
                x, y, z = list(map(int, tile))

                # we predicted on buffered tiles; now get back probs for original image
                prob = directory.unbuffer(prob)

                # Quantize the floating point probabilities in [0,1] to [0,255] and store
                # a single-channel `.png` file with a continuous color palette attached.

                assert prob.shape[0] == 2, 'single channel requires binary model'
                assert np.allclose(np.sum(prob, axis=0), 1.), 'single channel requires probabilities to sum up to one'
                foreground = prob[1:, :, :]

                anchors = np.linspace(0, 1, 256)
                quantized = np.digitize(foreground, anchors).astype(np.uint8)

                palette = continuous_palette_for_color('pink', 256)

                out = Image.fromarray(quantized.squeeze(), mode='P')
                out.putpalette(palette)

                os.makedirs(os.path.join(args.probs, str(z), str(x)), exist_ok=True)
                path = os.path.join(args.probs, str(z), str(x), str(y) + '.png')

                out.save(path, optimize=True)
示例#5
0
def main(args):
    config = load_config(args.config)
    num_classes = len(config["classes"]["titles"])

    if torch.cuda.is_available():
        device = torch.device("cuda")
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device("cpu")

    def map_location(storage, _):
        return storage.cuda() if torch.cuda.is_available() else storage.cpu()

    # https://github.com/pytorch/pytorch/issues/7178
    chkpt = torch.load(args.checkpoint, map_location=map_location)

    net = UNet(num_classes).to(device)
    net = nn.DataParallel(net)

    net.load_state_dict(chkpt["state_dict"])
    net.eval()

    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = Compose([ImageToTensor(), Normalize(mean=mean, std=std)])

    directory = BufferedSlippyMapDirectory(args.tiles,
                                           transform=transform,
                                           size=args.tile_size,
                                           overlap=args.overlap)
    loader = DataLoader(directory,
                        batch_size=args.batch_size,
                        num_workers=args.workers)

    if args.masks_output:
        palette = make_palette(config["classes"]["colors"][0],
                               config["classes"]["colors"][1])
    else:
        palette = continuous_palette_for_color("pink", 256)

    # don't track tensors with autograd during prediction
    with torch.no_grad():
        for images, tiles in tqdm(loader,
                                  desc="Eval",
                                  unit="batch",
                                  ascii=True):
            images = images.to(device)
            outputs = net(images)

            # manually compute segmentation mask class probabilities per pixel
            probs = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()

            for tile, prob in zip(tiles, probs):
                x, y, z = list(map(int, tile))

                # we predicted on buffered tiles; now get back probs for original image
                prob = directory.unbuffer(prob)

                assert prob.shape[
                    0] == 2, "single channel requires binary model"
                assert np.allclose(
                    np.sum(prob, axis=0), 1.0
                ), "single channel requires probabilities to sum up to one"

                if args.masks_output:
                    image = np.around(prob[1:, :, :]).astype(
                        np.uint8).squeeze()
                else:
                    image = (prob[1:, :, :] * 255).astype(np.uint8).squeeze()

                out = Image.fromarray(image, mode="P")
                out.putpalette(palette)

                os.makedirs(os.path.join(args.probs, str(z), str(x)),
                            exist_ok=True)
                path = os.path.join(args.probs, str(z), str(x),
                                    str(y) + ".png")

                out.save(path, optimize=True)

    if args.web_ui:
        template = "leaflet.html" if not args.web_ui_template else args.web_ui_template
        tiles = [tile for tile, _ in tiles_from_slippy_map(args.tiles)]
        web_ui(args.probs, args.web_ui, tiles, tiles, "png", template)