ret, mask = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)

        # Denoise the mask
        mask = SegmentationNetwork.denoise(mask)

        # Invert the mask, since Emma painted the particles black rather than white in the mask layer,
        # whereas the neural network uses classification label 0 for "not particle" and 1 for "particle"
        mask = 255 - mask

        # Open the file containing the RGB channel data
        channels = mergetiff.rasterFromFile(channelsFile)
        if channels.shape[2] > 3:
            channels = channels[:, :, 0:3]
        elif channels.shape[2] < 3:
            Logger.error('could not extract RGB channel data!')

        # Merge the RGB channels with the modified mask
        shape = (channels.shape[0], channels.shape[1], 4)
        merged = np.zeros(shape, dtype=channels.dtype)
        merged[:, :, 0:3] = channels
        merged[:, :, 3] = mask

        # Write the output file (convert RGB to BGR for OpenCV)
        if os.path.exists(outfile) == True:
            Logger.warning('overwriting existing file {}'.format(outfile))
        cv2.imwrite(outfile, merged[..., [2, 1, 0, 3]])

# Progress output
timer.end()
Logger.success('preprocessing complete ({}).'.format(timer.report()))