ret, mask = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)

        # Denoise the mask
        mask = SegmentationNetwork.denoise(mask)

        # Invert the mask, since Emma painted the particles black rather than white in the mask layer,
        # whereas the neural network uses classification label 0 for "not particle" and 1 for "particle"
        mask = 255 - mask

        # Open the file containing the RGB channel data
        channels = mergetiff.rasterFromFile(channelsFile)
        if channels.shape[2] > 3:
            channels = channels[:, :, 0:3]
        elif channels.shape[2] < 3:
            Logger.error('could not extract RGB channel data!')

        # Merge the RGB channels with the modified mask
        shape = (channels.shape[0], channels.shape[1], 4)
        merged = np.zeros(shape, dtype=channels.dtype)
        merged[:, :, 0:3] = channels
        merged[:, :, 3] = mask

        # Write the output file (convert RGB to BGR for OpenCV)
        if os.path.exists(outfile) == True:
            Logger.warning('overwriting existing file {}'.format(outfile))
        cv2.imwrite(outfile, merged[..., [2, 1, 0, 3]])

# Progress output
timer.end()
Logger.success('preprocessing complete ({}).'.format(timer.report()))
    imageGroundTruth = '{}.groundtruth.png'.format(imageBase)
    imagePrediction = '{}.prediction.png'.format(imageBase)
    imageSliced = '{}.sliced.png'.format(imageBase)
    cv2.imwrite(imageChannels, channels)
    cv2.imwrite(imageGroundTruth, groundTruth)
    cv2.imwrite(imagePrediction, prediction * 255)
    cv2.imwrite(imageSliced, sliced)

    # Generate the table row for our HTML report
    tableRows.append(
        '<tr><td><img src="{}"></td><td><img src="{}"></td><td><img src="{}"></td><td><img src="{}"></td><td>{}</td></tr>'
        .format(
            os.path.basename(imageChannels),
            os.path.basename(imageGroundTruth),
            os.path.basename(imagePrediction), os.path.basename(imageSliced),
            ''.join([
                '<li><strong>Size:</strong> {:.0f} x {:.0f}, <strong>Ratio:</strong> {:.2f}</li>'
                .format(particle['length'], particle['width'],
                        particle['ratio']) for particle in particleDetails
            ])))

# Save the HTML report
html = REPORT_TEMPLATE.replace('$$__ACCURACY__$$',
                               '{:.2f}%'.format(accuracy * 100.0))
html = html.replace('$$__ROWS__$$', '\n'.join(tableRows))
Utility.writeFile(os.path.join(outputDir, '_report.html'), html)

# Progress output
timer.end()
Logger.success('report generation complete ({}).'.format(timer.report()))
示例#3
0
# Retrieve the list of input files
inputDir = Configuration.path('segmentation_data_preprocessed')
outputDir = Configuration.path('segmentation_data_sliced')
inputFiles = natsorted(glob.glob(os.path.join(inputDir, '*.png')))

# Progress output
print('Slicing preprocessed data for the segmentation network ({} files)...'.
      format(len(inputFiles)))

# Keep track of processing progress and timing
numFiles = len(inputFiles)
timer = TimeLogger(numFiles, 'file')
progress = ProgressLogger(numFiles)

# Process each input file
for filenum, infile in enumerate(inputFiles):

    # Progress output
    progress.progress(filenum, 'Slicing input file "{}"...'.format(infile))

    # Slice the file
    SegmentationNetwork.sliceToDir(infile,
                                   outputDir,
                                   includeMask=True,
                                   warnOnOverwrite=True)

# Progress output
timer.end()
Logger.success('slicing complete ({}).'.format(timer.report()))