Exemple #1
0
def test(input, output):
    # loading the testing data
    dataset = loader.load('test', base_dir=input)
    dataset.set_transform(data_transform)

    # load the network
    net = torch.load('networks/tiramisu.torchnet')

    for idx in range(0, len(dataset)):
        image, mask = dataset[idx]  # mask will be None
        image = Variable(image.unsqueeze(0).cuda())
        net_out = net(image).data.cpu()
        output_arr = net_out.numpy()[0]

        # the output is a 3 x SIZE x SIZE array that gives confidence values for each class in the three channels
        # we need to change these into a segmentation map
        segmap = np.empty((SIZE, SIZE))
        for row in range(0, SIZE):
            for col in range(0, SIZE):
                vals = output_arr[:, row, col]
                index = np.argmax(vals)
                segmap[row, col] = index

        original_size = dataset.get_original_size(idx)
        orig_size_transform = preprocess.ResizeMask(original_size[0], original_size[1])
        segmap = orig_size_transform.__call__(segmap)

        hash = dataset.get_hash(idx)
        postprocess.export_as_png(segmap, output, hash)

    tar_path = postprocess.make_tar(output)

    print("Done!")
    print("Results written to %s" % tar_path)
Exemple #2
0
def tune():
    # load the training set
    dataset = loader.load('train')
    num_samples = len(dataset)

    # compute variance along the time dimension
    variance_transform = preprocess.Variance()
    transform = Compose([variance_transform])
    dataset.set_transform(transform)

    # compute the optimal threshold for each sample
    optimal_thresholds = np.zeros(num_samples)
    optimal_filter_sizes = np.zeros(num_samples)
    optimal_scores = np.zeros(num_samples)
    for idx in range(0, num_samples):
        sample_image = dataset[idx]

        # for each image, find parameters which maximize intersection-over-union score
        optimal_threshold = 0.8
        optimal_filter_size = 1
        optimal_score = 0.0
        for threshold in np.arange(1.0, 15.1, 0.1):
            for filter_size in np.arange(1, 5, 1):
                image = sample_image.median_filter(size=filter_size).toarray()
                thresholding = image > threshold  # indices where the variance is greater than the threshold
                mask = np.zeros(image.shape)
                mask[thresholding] = 2  # the value 2 indicated cilia
                score = dataset.compute_score(idx, mask)
                if score > optimal_score:
                    optimal_score = score
                    optimal_threshold = threshold
                    optimal_filter_size = filter_size

        # record the optimal threshold for this movie
        optimal_thresholds[idx] = optimal_threshold
        optimal_filter_sizes[idx] = optimal_filter_size
        optimal_scores[idx] = optimal_score

    # average the optimal parameters that were found
    print("Average optimal threshold: %0.4f" % np.mean(optimal_thresholds))
    print("Variance: %0.4f" % np.var(optimal_thresholds))

    print("Average optimal filter size: %0.4f" % np.mean(optimal_filter_sizes))
    print("Variance: %0.4f" % np.var(optimal_filter_sizes))

    print("Average score: %0.4f" % np.mean(optimal_scores))
    print("Variance: %0.4f" % np.var(optimal_scores))
Exemple #3
0
def main():
    dataset = loader.load(samples='all')
    print("samples loaded")
    max_row = 0
    max_cols = 0
    for i in range(len(dataset)):

        tmp_row = dataset[i].shape[1]
        print(tmp_row)
        tmp_cols = dataset[i].shape[2]
        print(tmp_cols)
        if tmp_row > max_row:
            max_row = tmp_row
            print("updating rows to: {}".format(max_row))
        if tmp_cols > max_cols:
            max_cols = tmp_cols
            print("updating cols to: {}".format(max_cols))
    print("Final max rows: {}".format(max_row))
    print("Final max cols: {}".format(max_cols))
Exemple #4
0
def main():
    # these samples will be automatically downloaded if they are not found locally
    dataset = loader.load(samples=[
        '4bad52d5ef5f68e87523ba40aa870494a63c318da7ec7609e486e62f7f7a25e8',
        'a7e37600a431fa6d6023514df87cfc8bb5ec028fb6346a10c2ececc563cc5423',
        '70a6300a00dbac92be9238252ee2a75c86faf4729f3ef267688ab859eed1cc60'
    ])

    # this transform will cause the dataset to find the mean image of a movie
    # anytime an item is requested from the dataset
    transform = preprocess.Mean()
    dataset.set_transform(transform)

    mean_images = list()
    for i in range(len(dataset)):
        sample = dataset[i]  # mean transform has already been applied!
        mean_images.append(sample)

    tile(mean_images)
    plt.show()
Exemple #5
0
def main():
    # these samples will be automatically downloaded if they are not found locally
    dataset = loader.load(samples=['4bad52d5ef5f68e87523ba40aa870494a63c318da7ec7609e486e62f7f7a25e8',
                                   'a7e37600a431fa6d6023514df87cfc8bb5ec028fb6346a10c2ececc563cc5423',
                                   '70a6300a00dbac92be9238252ee2a75c86faf4729f3ef267688ab859eed1cc60'])
    resize_transform = preprocess.Resize(dataset, 640, 480)
    fft_transform = fft_features.Frequency(n=128)
    cuda_transform = tvt.Lambda(lambda x: torch.from_numpy(x).float().cuda())
    submean_tranform = tvt.Lambda(lambda x: x.sub(torch.mean(x, dim=0)))
    flat_transform = tvt.Lambda(lambda x: x.view(307200, -1))
    svd_transform = tvt.Lambda(lambda x: fft_features.PCA(x, k=10))
    reshape = tvt.Lambda(lambda x: x.view(640, 480, -1))

    transforms = Compose([fft_transform,
                            cuda_transform,
                            submean_tranform])

    dataset.set_transform(transforms)

    for i in range(len(dataset)):
        sample = dataset[i]
        print(sample)
Exemple #6
0
def main(input='./data', output='./results/min_var', threshold=None, filter_size=4):
    # load the test data
    print("Loading Data...")
    dataset = loader.load('test', base_dir=input)

    # compute variance along the time axis
    transforms = []
    variance_transform = preprocess.Variance()
    transforms.append(variance_transform)

    if filter_size > 0:
        med_filter_transform = preprocess.MedianFilter(size=filter_size)
        transforms.append(med_filter_transform)

    transform = Compose(transforms)
    dataset.set_transform(transform)

    # segment each image and write it to the results directory
    print("Segmenting images...")
    for idx in range(0, len(dataset)):
        img, target = dataset[idx]
        img = img.toarray()
        hash = dataset.get_hash(idx)

        # create cilia mask based on grayscale variance thresholding
        mask = np.zeros(img.shape)
        if threshold is None:
            # if the threshold is not specified, then we use the mean variance
            threshold = np.mean(img)

        thresholding = img >= threshold
        mask[thresholding] = 2

        postprocess.export_as_png(mask, output, hash)

    tar_path = postprocess.make_tar(output)

    print("Done!")
    print("Results written to %s" % tar_path)
Exemple #7
0
def main(input='./data', output='./results/fft_dom', k=10, dom_frequency=11):
    # these samples will be automatically downloaded if they are not found locally
    dataset = loader.load(samples='test')
    resize_transform = preprocess.Resize(dataset, 640, 480)
    fft_transform = fft_features.Frequency(n=128)
    cuda_transform = tvt.Lambda(lambda x: torch.from_numpy(x).float().cuda())
    submean_tranform = tvt.Lambda(lambda x: x.sub(torch.mean(x, dim=0)))
    flat_transform = tvt.Lambda(lambda x: x.view(307200, -1))
    svd_transform = tvt.Lambda(lambda x: fft_features.PCA(x, k=10))
    reshape = tvt.Lambda(lambda x: x.view(640, 480, -1))
    max_freq = tvt.Lambda(lambda x: x.argmax(dim=0))

    transforms = Compose([
        fft_transform,
        cuda_transform,
        submean_tranform,
    ])
    dataset.set_transform(transforms)

    for i in range(0, len(dataset)):
        img, target = dataset[i]
        img_arr = img.cpu().numpy()
        print(img_arr.shape)
        hash = dataset.get_hash(i)

        # create cilia mask based on grayscale variance thresholding
        mask = np.zeros(img_arr.shape[1:])

        mask[np.where(img_arr != 10)[1:]] = 2
        mask[np.where(img_arr != 11)[1:]] = 2
        mask[np.where(img_arr != 12)[1:]] = 2

        postprocess.export_as_png(mask, output, hash)

    tar_path = postprocess.make_tar(output)
    print("Done!")
    print("Results written to %s" % tar_path)
Exemple #8
0
def train(input, epochs=200, learning_rate=1e-4):
    dataset = loader.load('train', base_dir=input)
    dataset.set_transform(data_transform)

    # create a transform that preprocesses the target masks
    mask_transform = torchvision.transforms.Compose([preprocess.ResizeMask(SIZE, SIZE)])
    dataset.set_mask_transform(mask_transform)

    # Create a Tiramisu network from the dense package
    # in_channels is the number of channels in the input images
    # out_channels is the number of classes
    net = FCDenseNet103(in_channels=1, out_channels=3).cuda()

    # train the network
    LR = learning_rate
    N_EPOCHS = epochs  # maximum number of epochs to train
    BATCH_SIZE = 3
    torch.cuda.manual_seed(0)
    optimizer = optim.SGD(net.parameters(), lr=LR)
    criterion = nn.NLLLoss2d().cuda()

    dataset_loader = dataloader.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

    for epoch in range(1, N_EPOCHS + 1):
        for idx, data in enumerate(dataset_loader):
            X = Variable(data[0].cuda())
            target = Variable(data[1].cuda()).long()
            optimizer.zero_grad()  # zero the gradient buffers
            output = net(X)  # generate prediction
            loss = criterion(output, target)  # compute loss
            loss.backward()  # backpropagate
            optimizer.step()  # update weights

        print("Epoch %d Finished" % epoch)
        # save the trained network
        torch.save(net, 'networks/tiramisu.torchnet')
    print("Done training!")