<th>Ground Truth</th>
					<th>Prediction</th>
					<th>Identified Particles</th>
					<th>Particle Details</th>
				</tr>
			</thead>
			<tbody>
				$$__ROWS__$$
			</tbody>
		</table>
	</body>
</html>
'''

# Retrieve the path to the report output directory and ensure it is empty
outputDir = Configuration.path('segmentation_reports')
for file in glob.glob(os.path.join(outputDir, '*.png')) + glob.glob(
        os.path.join(outputDir, '*.html')):
    os.unlink(file)

# Retrieve the list of pre-windowed validation images for the segmentation neural network
validationData = SegmentationNetwork.getValidationData()
preWindowed = validationData.images

# Strip the window-related suffixes to determine the original (non-windowed) image filenames
stripRegex = re.compile('\\-[0-9]+\\-window[0-9]+')
origDir = Configuration.path('segmentation_data_preprocessed')
originalImages = set([
    os.path.join(origDir, stripRegex.sub('', os.path.basename(p)))
    for p in preWindowed
])
#!/usr/bin/env python3
from common import Configuration
from natsort import natsorted
import glob, math, os, shutil, sys
import numpy as np

# The approximate batch size that we will group the sliced images into
BATCH_SIZE = 100

# Retrieve the list of image files
imageDir = Configuration.path('segmentation_data_sliced')
images = natsorted(glob.glob(os.path.join(imageDir, '*.png')))

# Split the list of images into batches
numDirs = math.ceil(len(images) / BATCH_SIZE)
batches = np.array_split(images, numDirs)

# Move each batch into a subdirectory
for index, batch in enumerate(batches):

    # Create the subdirectory for the batch
    subdir = os.path.join(imageDir, str(index))
    if os.path.exists(subdir) == False:
        os.makedirs(subdir)

    # Move each of the files into the subdirectory
    for file in batch.tolist():

        # Progress output
        print('Moving {} to {}...'.format(file, subdir))
        sys.stdout.flush()
#!/usr/bin/env python3
from common import Configuration, Logger, TimeLogger, ProgressLogger, SegmentationNetwork
from natsort import natsorted
import glob, os

# Retrieve the list of input files
inputDir = Configuration.path('segmentation_data_sliced')
outputDir = Configuration.path('segmentation_data_windowed')
inputFiles = natsorted(glob.glob(os.path.join(inputDir, '*.png')))

# Progress output
print('Windowing sliced data for the segmentation network ({} files)...'.format(len(inputFiles)))

# Keep track of processing progress and timing
numFiles = len(inputFiles)
timer = TimeLogger(numFiles, 'file')
progress = ProgressLogger(numFiles)

# Process each input file
for filenum, infile in enumerate(inputFiles):
	
	# Progress output
	progress.progress(filenum, 'Windowing input file "{}"...'.format(infile))
	
	# Slice the file
	SegmentationNetwork.windowToDir(infile, outputDir, warnOnOverwrite=True)

# Progress output
timer.end()
Logger.success('windowing complete ({}).'.format(timer.report()))
#!/usr/bin/env python3
from common import Configuration, Logger, TimeLogger, ProgressLogger, SegmentationNetwork
import cv2, glob, mergetiff, os, subprocess, tempfile, time
from natsort import natsorted
import numpy as np

# The layer numbers for the RGB channel data and the binary mask
# (The defaults specified here are correct for the TIFF files Emma prepared in Photoshop)
CHANNELS_LAYER = 1
MASK_LAYER = 0

# Retrieve the list of input files
inputDir = Configuration.path('segmentation_data_raw')
outputDir = Configuration.path('segmentation_data_preprocessed')
inputFiles = natsorted(glob.glob(os.path.join(inputDir, '**', '*.tif')))

# Progress output
print(
    'Preprocessing raw data for the segmentation network ({} files)...'.format(
        len(inputFiles)))

# Keep track of processing progress and timing
numFiles = len(inputFiles)
timer = TimeLogger(numFiles, 'file')
progress = ProgressLogger(numFiles)

# Process each input file
for filenum, infile in enumerate(inputFiles):

    # Progress output
    progress.progress(filenum,
				<tr>
					<th>Input Image</th>
					<th>Ground Truth</th>
					<th>Prediction</th>
				</tr>
			</thead>
			<tbody>
				$$__ROWS__$$
			</tbody>
		</table>
	</body>
</html>
'''

# Retrieve the path to the report output directory and ensure it is empty
outputDir = Configuration.path('classification_reports')
for file in glob.glob(os.path.join(outputDir, '*.png')) + glob.glob(
        os.path.join(outputDir, '*.html')):
    os.unlink(file)

# Retrieve the list of validation images for the morphotype classification neural network
validationData = ClassificationNetwork.getValidationData()
validationImages = sorted([
    os.path.join(validationData.directory, p) for p in validationData.filenames
])

# Load the network from the last saved checkpoint and compute our overall validation accuracy
model, metadata = ClassificationNetwork.load()
accuracy = ValidationUtils.computeValidationAccuracy(model, validationData)

# Compute our overall validation accuracy