"""
UNSTACK_TIFF.py
"""
import os
import shutil

from src.image_processing import unstack_images
from src.param_parser import parse_params


def unstack_tiff(images_dir, stacks_dir, clear_existing=False, compress=False):
    if clear_existing and os.path.exists(images_dir):
        shutil.rmtree(images_dir)
    if not os.path.exists(images_dir):
        os.makedirs(images_dir)

    return unstack_images(images_dir, stacks_dir, compress=compress)


if __name__ == '__main__':
    params = parse_params("Run to unstack tiff images.")

    stacks_dir = '../projects/nuclear/resources/images/raw-stacks'
    images_dir = '../projects/nuclear/resources/images/raw'
    unstack_tiff(images_dir, stacks_dir)
Exemple #2
0
# TODO: handle these types of errors


def preprocess_zooniverse_csv(output_dir, input_path, workflow):
    zoon_parser = ZooniverseCSVParser(output_dir=output_dir, workflow=workflow)
    zoon_parser.convert(zooniverse_csv_path=input_path)

    print('Finished converting Zooniverse csv...')
    print(
        f'Total processed rows in workflow (including failures): {zoon_parser.processed}'
    )
    print(
        f'Missing reference image failures: {zoon_parser.errors["missing_ref_image"]}'
    )
    print(
        f'Invalid (annotation) format failures: {zoon_parser.errors["invalid_format"]}'
    )


if __name__ == '__main__':
    params = parse_params(
        "Run step 010 to break the zooniverse csv apart in to smaller files.")

    csv_input_path = os.path.join('..', params['zooniverse_csv_file'])
    csv_output_dir = os.path.join('..', params['processed_csv_dir'])

    zooniverse_workflow = params['zooniverse_workflow']

    preprocess_zooniverse_csv(csv_output_dir, csv_input_path,
                              zooniverse_workflow)
Exemple #3
0
from src.image_processing import unstack_images
from src.param_parser import parse_params


def unstack(rawimage_dir,
            rawimagestack_dir,
            output_extension=".tiff",
            add_prefix_z=True,
            z_index_offset=0):
    if not os.path.exists(rawimage_dir):
        os.makedirs(rawimage_dir)

    nprocessed = unstack_images(rawimage_dir, rawimagestack_dir,
                                output_extension, add_prefix_z, z_index_offset)

    print(f'Processed image stacks: {nprocessed}')
    print('Finished processing images...')


if __name__ == '__main__':
    params = parse_params("Run step 020 to unstack the source tiffs.")

    images_raw_dir = os.path.join('..', params['images_raw_dir'])
    images_raw_stack_dir = os.path.join('..', params['images_raw_stack_dir'])
    ref_image_z_offset = params['ref_images']['z_offset']

    unstack(images_raw_dir,
            images_raw_stack_dir,
            z_index_offset=ref_image_z_offset)
             patch_size, padding, clear_existing=False):
    if clear_existing and os.path.exists(cropped_image_folder):
        shutil.rmtree(cropped_image_folder)
    if clear_existing and os.path.exists(cropped_label_folder):
        shutil.rmtree(cropped_label_folder)

    if not os.path.exists(cropped_image_folder):
        os.makedirs(cropped_image_folder)
    if not os.path.exists(cropped_label_folder):
        os.makedirs(cropped_label_folder)

    rois = get_rois(label_folder)

    for roi in tqdm(rois):
        crop_roi(roi, image_folder, label_folder, cropped_image_folder, cropped_label_folder,
                 padding, patch_size)


if __name__ == '__main__':
    params = parse_params("Run step 050 to crop out unannotated areas of the images.")

    images_raw_dir = '../'+params['scaled_images_dir']
    images_raw_labels_dir = '../'+params['scaled_labels_dir']
    cropped_images_dir = '../'+params['cropped_images_dir']
    cropped_labels_dir = '../'+params['cropped_labels_dir']

    padding = params['crop_padding']
    patch_size = params['model']['patch_shape']

    do_crops(images_raw_dir, images_raw_labels_dir, cropped_images_dir, cropped_labels_dir, patch_size[1:], padding)
Exemple #5
0
                        border_width=border_width)
                else:
                    raise ValueError(
                        f'Invalid aggregation method: \'{method}\'.')

                #illustrate_draw_annotations(output_dir + "/..", annotations, width, height, border_width, True)
                #illustrate_area_annotations(output_dir + "/..", annotations, width, height)

                save_aggregations(output_filepath, aggregation, res_info)
    print(
        f"Aggregation failures because of a missing reference image: {missing_ref_images}"
    )


if __name__ == '__main__':
    params = parse_params(
        "Run step 030 to aggregate citizen science annotations.")

    csv_dir = os.path.join('..', params['processed_csv_dir'])
    ref_images_dir = os.path.join('..', params['images_raw_dir'])
    labels_dir = os.path.join('..', params['images_raw_labels_dir'])

    ref_image_zoom = params['ref_images']['zoom_factor']
    ref_image_target_width = params['ref_images']['target_width']
    ref_image_target_height = params['ref_images']['target_height']

    aggregation_method = params['aggregation_method']
    border_width_nm = params['border_width_nm']

    aggregate(csv_dir,
              ref_images_dir,
              labels_dir,
    for file in tqdm(os.listdir(raw_dir)):
        filename, ext = os.path.splitext(file)
        csv_filename = csv_dir + filename + '.csv'
        scale_xy = get_xy_scale(csv_filename, targetsize_nm_xy)
        if scale_xy != 0:
            scale_save_image(raw_dir,
                             scaled_dir,
                             filename,
                             scale_xy,
                             targetsize_nm_z,
                             binary_format=binary_format)


if __name__ == '__main__':
    params = parse_params("Run step 040 to downscale the images and labels.")

    processed_csv_dir = os.path.join('..', params['processed_csv_dir'])
    images_raw_dir = os.path.join('..', params['images_raw_dir'])
    images_raw_labels_dir = os.path.join('..', params['images_raw_labels_dir'])
    scaled_images_dir = os.path.join('..', params['scaled_images_dir'])
    scaled_labels_dir = os.path.join('..', params['scaled_labels_dir'])

    target_xy_nm = params['target_xy_nm']
    target_z_nm = params['target_z_nm']

    print('Downscaling source images')
    rescale(processed_csv_dir,
            images_raw_dir,
            scaled_images_dir,
            target_xy_nm,
Exemple #7
0
"""
from src.ml.DataLoader import DataLoader
from src.ml.model import get_model
from src.ml.model_train import model_train
from src.param_parser import parse_params


def train_model(params, model_images_dir, model_labels_dir, model_save_dir,
                aggregation_method):
    data_loader = DataLoader(params, model_images_dir, model_labels_dir,
                             model_save_dir, aggregation_method)
    print("Training data:   ", data_loader.training_rois)
    print("Validation data: ", data_loader.validation_rois)
    print("Holdout data:    ", data_loader.holdout_rois)
    #data_loader.visualise_training_batch()

    model, epoch = get_model(params, data_loader)
    model_train(model, params, data_loader, epoch)


if __name__ == '__main__':
    params = parse_params("Run step 080 to train the model.")

    model_images_dir = '../' + params['data']['images_dir']
    model_labels_dir = '../' + params['data']['labels_dir']
    model_save_dir = '../' + params['model']['save_dir']
    aggregation_method = params['aggregation_method']

    train_model(params, model_images_dir, model_labels_dir, model_save_dir,
                aggregation_method)
import os
from src.param_parser import parse_params
from src.zooniverse import ZooniverseCSVParser


params = parse_params("Create censored zooniverse csv file.", parameter_file='../../projects/nuclear/nuclear.json')

csv_input_path = os.path.join('../..', params['zooniverse_csv_file'])
csv_output_dir = os.path.join('../..', params['processed_csv_dir'], '..')
csv_output_filename = os.path.join(csv_output_dir, 'censored.csv')

zooniverse_workflow = params['zooniverse_workflow']

zoon_parser = ZooniverseCSVParser(output_dir=csv_output_dir, workflow=zooniverse_workflow)
zoon_parser.censor(zooniverse_csv_path=csv_input_path, output_path=csv_output_filename)
Exemple #9
0
    print("Discarding images with low annotations")
    for csv in tqdm(os.listdir(csv_dir)):
        data_frame = load_processed_csv(csv_dir + csv)
        filename, _ = os.path.splitext(csv)
        image_filepath = get_file(ref_images_dir + filename + ".*")
        label_filepath = get_file(labels_dir + filename + ".*")

        if len(data_frame) < min_annotations:
            if os.path.exists(image_filepath):
                os.remove(image_filepath)
            if os.path.exists(label_filepath):
                os.remove(label_filepath)
            ndiscarded += 1

    print(f'Discarded images: {ndiscarded}')


if __name__ == '__main__':
    params = parse_params(
        "Run step 060 to remove slices with very few annotations.")

    processed_csv_dir = os.path.join('..', params['processed_csv_dir'])
    cropped_images_dir = os.path.join('..', params['cropped_images_dir'])
    cropped_labels_dir = os.path.join('..', params['cropped_labels_dir'])

    #check_annotations(processed_csv_dir, cropped_images_dir, cropped_labels_dir)
    #discard_ref_images(processed_csv_dir, cropped_images_dir, cropped_labels_dir)
    discard_ref_images(processed_csv_dir,
                       "../projects/nuclear/resources/images/raw/",
                       "../projects/nuclear/resources/images/raw-labels/")
Exemple #10
0
            print("Stack error", stack_filename, "Type:", np_stack.dtype,
                  "Shape:", np_stack.shape)
        else:
            save_image(label_stacks_dir + stack_filename + '.tiff',
                       np_stack,
                       resx=resx_label,
                       resy=resy_label,
                       size_z=size_z,
                       res_unit=res_unit_label,
                       compress=compress)

    return image_range


if __name__ == '__main__':
    params = parse_params("Run step 070 to stack tiff images.")

    #scaled_images_dir = '../'+params['scaled_images_dir']
    #scaled_labels_dir = '../'+params['scaled_labels_dir']
    #scaled_image_stacks_dir = '../'+params['scaled_image_stacks_dir']
    #scaled_label_stacks_dir = '../'+params['scaled_label_stacks_dir']

    #cropped_images_dir = '../'+params['cropped_images_dir']
    #cropped_labels_dir = '../'+params['cropped_labels_dir']
    #cropped_image_stacks_dir = '../'+params['cropped_image_stacks_dir']
    #cropped_label_stacks_dir = '../'+params['cropped_label_stacks_dir']

    #create_tiff_stack_matching(scaled_images_dir, scaled_image_stacks_dir, scaled_labels_dir, scaled_label_stacks_dir)
    #create_tiff_stack_matching(cropped_images_dir, cropped_image_stacks_dir, cropped_labels_dir, cropped_label_stacks_dir)

    images_dir = '../projects/nuclear/resources/images/raw/'