def write_images_to_tensorboard(inputs, outputs, global_step,
                                step=False, best=False):
    # Add images to tensorboard
    # Current autoencoder fit
    grid1 = torchvision.utils.make_grid(torch.cat((
        inputs.cpu(), outputs.cpu())), nrow=args.batch_size)

    # Current quality of generated random images
    sample_size = 4 * args.batch_size
    sample = torch.randn(sample_size, args.latent_dim).to(args.device)
    decoded_sample = args.net.decode(sample).cpu()

    # print images
    grid2 = torchvision.utils.make_grid(decoded_sample,
                                        nrow=args.batch_size)

    if step:
        args.writer.add_image('Train/fit', grid1, global_step)
        args.writer.add_image('Train/generated', grid2, global_step)
    elif best:
        args.writer.add_image('Best/fit', grid1, global_step)
        args.writer.add_image('Best/generated', grid2, global_step)
    else:
        args.writer.add_image('encoder-fit', grid1)
        args.writer.add_image('latent-random-sample-decoded', grid2)
        imshow(grid1)
        imshow(grid2)
def plot_latent_space(dataiter, images, labels):
    # Plot mesh grid from latent space
    numImgs = 30
    lo, hi = -3., 3.

    # Define mesh grid ticks
    z1 = torch.linspace(lo, hi, numImgs)
    z2 = torch.linspace(lo, hi, numImgs)

    # Create mesh as pair of elements
    z = []
    for idx in range(numImgs):
        for jdx in range(numImgs):
            z.append([z1[idx], z2[jdx]])
    z = torch.tensor(z).to(args.device)

    # Decode elements from latent space
    decoded_z = args.net.decode(z).cpu()

    # print images
    grid = torchvision.utils.make_grid(decoded_z, nrow=numImgs)
    imshow(grid)
    args.writer.add_image('latent-space-grid-decoded', grid)

    # Plot encoded test set into latent space
    numBatches = 500
    for idx in range(numBatches):
        tImages, tLabels = dataiter.next()
        images = torch.cat((images.cpu(), tImages.cpu()), 0)
        labels = torch.cat((labels.cpu(), tLabels.cpu()), 0)

    # encode into latent space
    images = images.cpu()
    encoded_images_loc, _ = args.net.cpu().encode(images)
    encoded_images_loc = encoded_images_loc.cpu().detach().numpy()

    # Scatter plot of latent space
    x = encoded_images_loc[:, 0]
    y = encoded_images_loc[:, 1]

    # Send to tensorboard
    fig = plt.figure(figsize=(12, 10))
    ax = fig.add_subplot(1, 1, 1)
    sct = ax.scatter(x, y, c=labels, cmap='jet')
    fig.colorbar(sct)
    args.writer.add_figure('scatter-plot-of-encoded-test-sample', fig)

    # Plot with matplotlib
    plt.figure(figsize=(12, 10))
    plt.scatter(x, y, c=labels, cmap='jet')
    plt.colorbar()
    filename = "imgs/test_into_latent_space_{}.png".format(numBatches)
    plt.savefig(filename)
    plt.show()
Example #3
0
    parser = ArgumentParser(
        description='Calculates the background levels for a set of images')
    parser.add_argument('--in',
                        required=True,
                        dest='infiles',
                        nargs='+',
                        help='Images used to set thresholds')
    parser.add_argument('--out', default='bg.png', help='Output file name')
    parser.add_argument(
        "--conv_len",
        type=int,
        default=0,
        help='Distance to which pixels are included in averaging')
    parser.add_argument('--show',
                        action='store_true',
                        help='Display resulting threshold image')
    parser.add_argument('--bg_img',
                        type=int,
                        help='Limits number of images to be processed')
    parser.add_argument('--clear_hotpix',
                        action='store_true',
                        help='If convolved, raise hot pixel thresholds to 256')

    args = parser.parse_args()

    bg = find_bg(args.infiles, args.out, args.conv_len, args.bg_img,
                 args.clear_hotpix)
    if args.show:
        import imshow
        imshow.imshow(args.out)
        key = roi.toSlice()
        self.outputs["Output"].setDirty(key)


from lazyflow import operators

# create graph
g = Graph()
# construct image reader
reader = operators.OpImageReader(graph=g)
reader.inputs["Filename"].setValue("ostrich.jpg")
# create Shifter_Operator with Graph-Objekt as argument
shifter = OpArrayShifter1(graph=g)

# connect Shifter-Input with Image Reader Output
# because the Operator has only one Input Slot in this example,
# the "setupOutputs" method is executed
shifter.inputs["Input"].connect(reader.outputs["Image"])

# shifter.outputs["Output"][:]returns an "GetItemWriterObject" object.
# its method "allocate" will be executed, this method call the "writeInto"
# method which calls the "fireRequest" method of the, in this case,
# "OutputSlot" object which calls another method in "OutputSlot and finally
# the "execute" method of our operator.
# The wait() function blocks other activities and waits till the results
# of the requested Slot are calculated and stored in the result area.
result = shifter.outputs["Output"][:].allocate().wait()

# display the result
imshow(result)
def main():

    if len(sys.argv) > 5:
        # Get console parameters
        train_percentage = float(sys.argv[1])
        train_batch = int(sys.argv[2])
        test_batch = int(sys.argv[3])
        dist_mean = float(sys.argv[4])
        dist_sd = float(sys.argv[5])
    else:
        print("Not enough parameters")
        return

    # Printing parameters
    torch.set_printoptions(precision=10)
    torch.set_printoptions(edgeitems=32)

    # Set up GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Assuming that we are on a CUDA machine, this should print a CUDA device:
    print("Device : ", device)

    # Load dataset
    full_dataset = datasets.MNISTSegmentationDataset(dist_mean, dist_sd)

    # Divide into Train and Test
    train_size = int(train_percentage * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(
        full_dataset, [train_size, test_size])

    # Dataset information
    print("train_dataset : ", len(train_dataset))
    print("test_dataset : ", len(test_dataset))

    # Create dataset loaders
    trainloader = torch.utils.data.DataLoader(train_dataset,
                                              batch_size=train_batch,
                                              shuffle=True,
                                              num_workers=0)

    testloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=test_batch,
                                             shuffle=True,
                                             num_workers=0)

    # Show sample of images
    # get some random training images
    dataiter = iter(trainloader)
    images, labels = dataiter.next()

    # # show images
    imshow(
        torchvision.utils.make_grid(torch.cat((images, labels)),
                                    nrow=train_batch))

    # Create network
    net = Net()
    net.to(device)
    print(net)

    # Define loss function and optimizer
    criterion = nn.L1Loss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    # Train network
    for epoch in range(2):  # loop over the dataset multiple times

        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            # Crop segmentation map
            # labels = labels#[:,:,8:-8,8:-8]

            if torch.cuda.is_available():
                inputs = inputs.cuda()
                labels = labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)
            # cprint(outputs.size(),inputs.size(),labels.size())
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if i % 2000 == 1999:  # print every 2000 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0

    print('Finished Training')

    # Test network and predict
    dataiter = iter(testloader)
    images, labels = dataiter.next()

    if torch.cuda.is_available():
        images_cuda = images.cuda()
        labels_cuda = labels.cuda()

    # print images
    imshow(
        torchvision.utils.make_grid(torch.cat((images, labels)),
                                    nrow=test_batch))

    outputs = net(images_cuda)

    #images = images#[:,:,8:-8,8:-8]
    #labels = labels#[:,:,8:-8,8:-8]
    predicted = ((outputs > .5).type(torch.float) - .5) * 2
    predicted_cpu = predicted.cpu()
    #print(images.size(),labels.size(),predicted_cpu.size())
    imshow(
        torchvision.utils.make_grid(torch.cat((images, labels, predicted_cpu)),
                                    nrow=test_batch))

    # Calculate network accuracy on Test dataset
    correct = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data

            if torch.cuda.is_available():
                images_cuda = images.cuda()
                labels_cuda = labels.cuda()

            outputs = net(images_cuda)

            #images = images#[:,:,8:-8,8:-8]
            #labels = labels#[:,:,8:-8,8:-8]
            predicted = ((outputs > .5).type(torch.float) - .5) * 2
            predicted_cpu = predicted.cpu()

            # print(images.size(),labels.size(),predicted.size())

            # print(predicted)
            # print(labels)
            # print("total : ",labels.size(0) * labels.size(1) * labels.size(2) * labels.size(3))
            # print("correct : ",(predicted.type(torch.long) == labels_cuda.type(torch.long)).sum().item())
            # print("label lit : ", (labels == 1).sum().item())
            # print("predicted lit : ", (predicted == 1).sum().item())

            # imshow(torchvision.utils.make_grid(torch.cat((images,labels,predicted_cpu)),nrow = test_batch))

            correct += (predicted.type(torch.long) == labels_cuda.type(
                torch.long)).sum().item()
            total += labels.size(0) * labels.size(1) * labels.size(
                2) * labels.size(3)

    print('Accuracy of the network on the %d test images: %d %%' %
          (len(test_dataset), 100 * correct / total))
def train(trainset):

    # Split dataset
    train_size = int(args.train_percentage * len(trainset))
    test_size = len(trainset) - train_size
    train_dataset, test_dataset \
        = torch.utils.data.random_split(trainset, [train_size, test_size])

    # Dataset information
    print('train dataset : {} elements'.format(len(train_dataset)))

    # Create dataset loader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True)

    # Show sample of images
    if args.plot:
        # get some random training images
        dataiter = iter(train_loader)
        images, _ = dataiter.next()

        grid = torchvision.utils.make_grid(images)
        imshow(grid)
        args.writer.add_image('sample-train', grid)

    # Define optimizer
    if args.optimizer == 'adam':
        optimizer = optim.Adam(args.net.parameters(), lr=1e-3)
    elif args.optimizer == 'sgd':
        optimizer = optim.SGD(args.net.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'rmsprop':
        optimizer = optim.RMSprop(args.net.parameters(), lr=0.01)

    # Loss function
    criterion = elbo_loss_function

    # Set best for minimization
    best = float('inf')

    print('Started Training')
    # loop over the dataset multiple times
    for epoch in range(args.epochs):
        # reset running loss statistics
        train_loss = mse_loss = running_loss = 0.0

        for batch_idx, data in enumerate(train_loader, 1):
            # get the inputs; data is a list of [inputs, labels]
            inputs, _ = data
            inputs = inputs.to(args.device)

            with autograd.detect_anomaly():
                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs, mu, logvar = args.net(inputs)
                loss = criterion(outputs, inputs, mu, logvar)
                loss.backward()
                optimizer.step()

            # update running loss statistics
            train_loss += loss.item()
            running_loss += loss.item()
            mse_loss += F.mse_loss(outputs, inputs)

            # Global step
            global_step = batch_idx + len(train_loader) * epoch

            # Write tensorboard statistics
            args.writer.add_scalar('Train/loss', loss.item(), global_step)
            args.writer.add_scalar('Train/mse', F.mse_loss(outputs, inputs),
                                   global_step)

            # check if current batch had best fitness
            if loss.item() < best:
                best = loss.item()
                update_best(inputs, outputs, loss, global_step)

            # print every args.log_interval of batches
            if batch_idx % args.log_interval == 0:
                print("Train Epoch : {} Batches : {} "
                      "[{}/{} ({:.0f}%)]\tLoss : {:.6f}"
                      "\tError : {:.6f}"
                      .format(epoch, batch_idx,
                              args.batch_size * batch_idx,
                              len(train_loader.dataset),
                              100. * batch_idx / len(train_loader),
                              running_loss / args.log_interval,
                              mse_loss / args.log_interval))

                mse_loss = running_loss = 0.0

                # Add images to tensorboard
                write_images_to_tensorboard(inputs, outputs,
                                            global_step, step=True)

        print('====> Epoch: {} Average loss: {:.4f}'.format(
              epoch, train_loss / len(train_loader)))

    # Add trained model
    args.writer.close()
    print('Finished Training')
def main():

    if len(sys.argv) > 5:
        # Get console parameters
        train_percentage = float(sys.argv[1])
        train_batch = int(sys.argv[2])
        test_batch = int(sys.argv[3])
        number_of_epochs = int(sys.argv[4])
        number_of_mini_batches = int(sys.argv[5])
    else:
        print("Not enough parameters")
        return

    # Printing parameters
    torch.set_printoptions(precision=10)
    torch.set_printoptions(edgeitems=32)

    # Set up GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Assuming that we are on a CUDA machine, this should print a CUDA device:
    print("Device : ", device)

    # Load dataset
    full_dataset = datasets.ISBI2012DatasetTrain('./dataset/train-volume.tif',
                                                 './dataset/train-labels.tif')
    submit_dataset = datasets.ISBI2012DatasetTest('./dataset/test-volume.tif')

    # Divide into Train and Test
    train_size = int(train_percentage * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset \
        = torch.utils.data.random_split(full_dataset, [train_size, test_size])

    # Test with full dataset
    test_dataset = full_dataset

    # Dataset information
    print("train_dataset : ", len(train_dataset))
    print("test_dataset : ", len(test_dataset))

    # Create dataset loaders
    trainloader = torch.utils.data.DataLoader(train_dataset,
                                              batch_size=train_batch,
                                              shuffle=True,
                                              num_workers=0)

    testloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=test_batch,
                                             shuffle=True,
                                             num_workers=0)

    submitloader = torch.utils.data.DataLoader(submit_dataset,
                                               batch_size=train_batch,
                                               shuffle=False,
                                               num_workers=0)

    # Show sample of images
    # get some random training images
    dataiter = iter(trainloader)
    images, labels = dataiter.next()

    # show images
    imshow(
        torchvision.utils.make_grid(torch.cat((images, labels)),
                                    nrow=train_batch))

    print('Started Training')

    # Create network
    net = UNet()
    net.to(device)
    # print(net)

    # Define loss function and optimizer
    loss_history = []
    criterion = nn.L1Loss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    # Train network - loop over the dataset multiple times
    for epoch in range(number_of_epochs):

        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            if torch.cuda.is_available():
                inputs = inputs.cuda()
                labels = labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            # print every number_of_mini_batches
            if i % number_of_mini_batches == number_of_mini_batches - 1:
                print(
                    '[%d, %5d] loss: %.8f' %
                    (epoch + 1, i + 1, running_loss / number_of_mini_batches))
                loss_history.append(running_loss / number_of_mini_batches)
                running_loss = 0.0

    print('Finished Training')

    # Test network and predict
    dataiter = iter(testloader)
    images, labels = dataiter.next()

    if torch.cuda.is_available():
        images_cuda = images.cuda()
        labels_cuda = labels.cuda()

    # print images
    imshow(
        torchvision.utils.make_grid(torch.cat((images, labels)),
                                    nrow=test_batch))

    outputs = net(images_cuda)

    predicted = ((outputs > .5).type(torch.float) - .5) * 2
    predicted_cpu = predicted.cpu()
    imshow(
        torchvision.utils.make_grid(torch.cat((images, labels, predicted_cpu)),
                                    nrow=test_batch))

    # Print loss over time
    plt.plot(range(1, number_of_epochs + 1), loss_history)
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.show()

    # Calculate network accuracy on Test dataset
    correct = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data

            if torch.cuda.is_available():
                images_cuda = images.cuda()
                labels_cuda = labels.cuda()

                outputs = net(images_cuda)
            else:
                outputs = net(images)

            predicted = ((outputs > .5).type(torch.float) - .5) * 2
            predicted_cpu = predicted.cpu()

            correct += (predicted.type(torch.long) == labels_cuda.type(
                torch.long)).sum().item()
            total += labels.size(0) \
                * labels.size(1) \
                * labels.size(2) \
                * labels.size(3) \

    print('Accuracy of the network on the %d test images: %d %%' %
          (len(test_dataset), 100 * correct / total))
Example #8
0
#       result = req.notify(callback)
#
#    The callback function that you have to provide receives
#    as first argument the result array. (To learn more read
#    the documentation of the GetItemRequestObject)
#
#    we will use a mere sychronous request for now:
    
    
result = req.wait()

#   To visualize the result of our graph processing pipeline
#   we will use a small image viewer helper:
    
    
imshow(result)

#    finally, we allso want to make sure that the noOp, which
#    is in fact an OpArrayPiper, does not change the image of
#    the image reader .
#    While doing so we will introduce you to the 
#    call chaining syntax you may use when retrieving results 
#    of operator outputs:

result2 = reader.outputs["Image"][:].allocate().wait()


#    this combines all previous operations into 
#    one line.

imshow(result2)
Example #9
0
def main():
    classes, testloader, trainloader = set_dataset()

    # ランダムな訓練データを取得
    dataiter = iter(trainloader)
    images, labels = dataiter.next()

    # 画像を表示
    imshow(torchvision.utils.make_grid(images))
    print(' '.join('%5s' % classes[labels[j]] for j in range(4)))

    net = Net()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    for epoch in range(2):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data

            optimizer.zero_grad()

            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            if i % 2000 == 1999:
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0
    print('Finished Training')

    PATH = './cifar_net.pth'
    torch.save(net.state_dict(), PATH)

    dataiter = iter(testloader)
    images, labels = dataiter.next()

    imshow(torchvision.utils.make_grid(images))
    print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))

    net = Net()
    net.load_state_dict(torch.load(PATH))

    outputs = net(images)

    _, predicted = torch.max(outputs, 1)

    print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
                                  for j in range(4)))

    correct = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy of the network on the 10000 test images: %d %%' %
          (100 * correct / total))

    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs, 1)
            c = (predicted == labels).squeeze()
            for i in range(4):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

    for i in range(10):
        print('Accuracy of %5s : %2d %%' % (
            classes[i], 100 * class_correct[i] / class_total[i]))

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    print(device)

    net.to(device)

    inputs, labels = data[0].to(device), data[1].to(device)
                                         warp_matrix, (sz[1], sz[0]),
                                         flags=cv2.INTER_LINEAR +
                                         cv2.WARP_INVERSE_MAP)
            result = cv2.warpAffine(result,
                                    warp_matrix, (sz[1], sz[0]),
                                    flags=cv2.INTER_LINEAR +
                                    cv2.WARP_INVERSE_MAP)

    # Show final results
    #imshow(im2_aligned[0],name="Aligned Image 2",showresult=True )

    return (im2_aligned, result)


if __name__ == '__main__':
    print("This file provides the function for aligning two images.")

    im1 = cv2.imread("D:\\footlongmodel2\\DSC_0536.jpg")
    im2 = cv2.imread("D:\\footlongmodel2\\DSC_0570.jpg")

    im3 = np.uint8(im1[600:1800, 3400:4000, 1])
    im4 = np.uint8(im2[600:1800, 3400:4000, 1])

    #im3 = np.uint8(im1[:,:,1])
    #im4 = np.uint8(im2[:,:,1])

    imshow(im3, name='im3', x=im3.shape[0], y=im3.shape[1])
    imshow(im4, name='im4', x=im4.shape[0], y=im4.shape[1])

    im5, res = alignImage(im3, im4, im4)
    im6, res = alignImage(im3, im4, im3)
Example #11
0
def findOilBlob(filename,
                threshold=127,
                color='gray',
                iterations=[2, 8, 6],
                showresult=False):
    #filename = "test.jpg"
    print("Reading image : ", filename)
    imReference = cv2.imread(filename, cv2.IMREAD_COLOR)
    '''
    # filter image
    imReference = cv2.bilateralFilter(imReference,5,40,40)
    '''

    # convert to grayscale
    if color in ['blue', 'BLUE', 'Blue', 'B', 'b']:
        imgray = imReference[:, :, 0]
    elif color in ['green', 'GREEN', 'Green', 'G', 'g']:
        imgray = imReference[:, :, 1]
    elif color in ['red', 'RED', 'Red', 'R', 'r']:
        imgray = imReference[:, :, 2]
    else:
        imgray = cv2.cvtColor(imReference, cv2.COLOR_BGR2GRAY)
    imgrayorig = imgray.copy()
    #imgSPLIT = cv2.split(imReference);
    #imgray = imgSPLIT[1];
    imshow(imgray, showresult, name='grayscale')

    # filter image
    imgray = cv2.bilateralFilter(imgray, 5, 40, 40)

    # cut image edges
    imReference = imReference[2:-2, 2:-2, :]

    # denoise
    imgray = cv2.fastNlMeansDenoising(imgray, None, 10, 7, 21)

    imshow(imgray, showresult, name='denoise')

    # threshold
    ret, thresh = cv2.threshold(imgray, threshold, 255, cv2.THRESH_OTSU)
    imshow(thresh, showresult, name='threshold')

    # erode and dilate
    kernel = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(thresh, kernel, iterations=iterations[0])
    dilation = cv2.dilate(erosion, kernel, iterations=iterations[1])
    imshow(dilation, showresult, name='dilation')
    erosion = cv2.erode(dilation, kernel, iterations=iterations[2])
    imshow(erosion, showresult, name='erosion')

    # get the contours
    '''
    # The cv2.findContours() function removed the first output in a newer version.
    tmpim, contours, hierarchy = cv2.findContours(thresh, method=cv2.RETR_TREE, \
                                              mode=cv2.CHAIN_APPROX_SIMPLE)
    '''
    contours, hierarchy = cv2.findContours(erosion, method=cv2.RETR_TREE, \
                                       mode=cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(imgray, contours, contourIdx = -1, color=(255,0,0), \
                     thickness=4)
    imshow(imgray, showresult, name='contours')

    # First, the real interfaces have a lot of points.
    # Abandon the contours with only several points.
    # Keep only the longest contour.
    contours = sorted(contours, key=len, reverse=True)
    contoursorig = np.copy(contours)
    contours = contours[0]

    return (imgray, contours, imgrayorig, contoursorig)
Example #12
0
        numcontourpoint = curvatureradius.shape[0]
        curvatureradiusplot = (curvatureradius[:, 2] -
                               minradius) / (maxradius - minradius) * 256
        Nx, Ny, __ = imorig.shape
        for lp1 in range(numcontourpoint):
            # coordinate index
            indy = int(oilcontour[lp1, 0])
            indx = int(oilcontour[lp1, 1])
            # color
            imorig[indx - dotsize:indx + dotsize + 1,
                   indy - dotsize:indy + dotsize + 1, [0, 1]] = 0
            imorig[indx - dotsize:indx + dotsize + 1,
                   indy - dotsize:indy + dotsize + 1,
                   2] = curvatureradiusplot[lp1]

        imshow(imorig, showresult)
        savename = targetpath + '/' + ite[1]
        cv2.imwrite(savename, imorig)
        '''
        count = count+1
        if count > 5:
            break
        '''

    print(maxradiustotal, minradiustotal)

    # plot the curvature radius of the last image
    plt.figure()
    plt.plot(curvatureradius[:, 2], 'sr-')

    # plot the color bar for the curvature radius