Пример #1
0
def get_io_dirs(run_type, cfg_parser):
    '''Gets input/output directories for either training data or forward pass'''
    if run_type != 'forward' and run_type != 'training':
        raise ValueError(
            'run_type variable should be one of "forward" or "training"',
            run_type)
    data_dir = add_pathsep(cfg_parser.get('general', 'data_dir'))[0:-1]
    input_dir = add_pathsep(data_dir + "_preprocessed")
    output_dir = add_pathsep(data_dir + "_training_output")
    return input_dir, output_dir
Пример #2
0
def main(main_config_fpath='../data/example/main_config.cfg',
         run_type='forward'):
    cfg_parser = ConfigParser.SafeConfigParser()
    cfg_parser.readfp(open(main_config_fpath, 'r'))
    memory = cfg_parser.get('docker', 'memory')
    use_docker_machine = cfg_parser.getboolean('docker', 'use_docker_machine')
    container_name = cfg_parser.get('docker', 'container_name')
    machine_name_prefix = cfg_parser.get('docker', 'machine_name')
    machine_name = machine_name_prefix + "-" + memory.strip()
    training_output_dir = add_pathsep(
        cfg_parser.get('training', 'training_output_dir'))
    data_dir = add_pathsep(cfg_parser.get('general', 'data_dir'))[0:-1]
    forward_output_dir = add_pathsep(data_dir + "_training_output")

    dir_to_mount = os.path.dirname(os.path.dirname(
        os.path.abspath(__file__)))  # Mounts ConvnetCellDetection directory
    print dir_to_mount

    cmd = ''
    if use_docker_machine:
        cmd += start_docker_machine(memory, machine_name)
        cmd += '; '
    cmd += start_znn_container(dir_to_mount, container_name)

    if run_type == 'training':
        cmd += train_network(training_output_dir) + remove_znn_container(
            container_name, machine_name, use_docker_machine)
    elif run_type == 'forward':
        cmd += forward_pass(forward_output_dir) + remove_znn_container(
            container_name, machine_name, use_docker_machine)
    else:
        cmd += remove_znn_container(container_name, machine_name,
                                    use_docker_machine)
        raise ValueError(
            'run_type variable should be one of "forward" or "training"',
            run_type)

    print cmd
    process = subprocess.Popen(cmd, shell=True)
    process.communicate()

    if run_type == 'forward':
        rename_output_files(cfg_parser, main_config_fpath, forward_output_dir)
        cfg_parser.remove_section('fnames')
        with open(main_config_fpath, 'w') as main_config_file:
            cfg_parser.write(main_config_file)
Пример #3
0
def is_training(main_config_fpath):
    '''check if complete pipeline should include training and scoring'''
    with open(main_config_fpath, 'r') as config_file:
        cfg_parser = ConfigParser.SafeConfigParser()
        cfg_parser.readfp(config_file)
        data_dir = preprocess.add_pathsep(cfg_parser.get(
            'general', 'data_dir'))
        if preprocess.is_labeled(data_dir):
            return True
        return False
def read_preprocessed_images(directory, filenames):
    '''reads and correlates preprocessed images
    input to ZNN with filenames read by read_network_output'''
    directory = add_pathsep(directory)
    images = []
    for fname in filenames:
        found = False
        for f in os.listdir(directory):
            if not os.path.isfile(directory + f): continue
            base = os.path.splitext(f)[0]
            if base in fname:
               im = Image.open(directory + f)
               im = np.array(im, dtype=np.float32)
               images.append(im)
               found = True
               break
        if not found:
            raise ValueError("Couldn't find " + fname + " in " + directory)
    return images
Пример #5
0
def main(main_config_fpath='../data/example/main_config.cfg'):
    cfg_parser = ConfigParser.SafeConfigParser()
    cfg_parser.readfp(open(main_config_fpath, 'r'))

    # read parameters
    data_dir = add_pathsep(cfg_parser.get('general', 'data_dir'))
    postprocess_dir = data_dir[0:-1] + "_postprocessed" + os.sep
    img_width = cfg_parser.getint('general', 'img_width')
    img_height = cfg_parser.getint('general', 'img_height')
    # if not is_labeled(data_dir) or not is_labeled(postprocess_dir): #we haven't been putting test/train/val into a "labeled" folder #Let's discuss if we should. -AR 09/13/16

    if not is_labeled(data_dir):
        print "original data_dir was not called 'labeled'. Aborting scoring."
        return
    else:
        print "Scoring labeled data"
        if postprocess_dir[-1] != os.path.sep:
            postprocess_dir += os.path.sep
        if data_dir[-1] != os.path.sep:
            data_dir += os.path.sep
        score_labeled_data(postprocess_dir, data_dir, img_width, img_height)
Пример #6
0
def create_znn_config_file(output_dir, train_indices, val_indices,
                           forward_indices, new_net_fpath, train_net_prefix,
                           train_patch_size, learning_rate, momentum,
                           num_iter_per_save, max_iter, forward_net,
                           forward_outsz, num_file_pairs):
    # copy default_znn_config.cfg from src to output_dir
    src_path = add_pathsep(os.path.dirname(os.path.abspath(__file__)))
    znn_config_path = output_dir + 'znn_config.cfg'
    shutil.copy(src_path + 'default_znn_config.cfg', znn_config_path)

    # use configParser to modify fields in new config file
    znn_cfg_parser = ConfigParser.SafeConfigParser()
    znn_cfg_parser.readfp(open(znn_config_path, 'r'))

    znn_cfg_parser.set('parameters', 'fnet_spec',
                       dockerize_path(new_net_fpath))
    znn_cfg_parser.set('parameters', 'fdata_spec',
                       dockerize_path(output_dir + 'dataset.spec'))
    znn_cfg_parser.set('parameters', 'train_net_prefix',
                       dockerize_path(train_net_prefix))
    znn_cfg_parser.set('parameters', 'train_range', train_indices)
    znn_cfg_parser.set('parameters', 'test_range', val_indices)
    znn_cfg_parser.set('parameters', 'train_outsz', train_patch_size)
    znn_cfg_parser.set('parameters', 'eta', learning_rate)
    znn_cfg_parser.set('parameters', 'momentum', momentum)
    znn_cfg_parser.set('parameters', 'Num_iter_per_save', num_iter_per_save)
    znn_cfg_parser.set('parameters', 'max_iter', max_iter)
    znn_cfg_parser.set('parameters', 'forward_range',
                       forward_indices)  # autoset as everything in input_dir
    znn_cfg_parser.set('parameters', 'forward_net',
                       dockerize_path(forward_net))
    znn_cfg_parser.set(
        'parameters', 'forward_outsz', forward_outsz
    )  # TODO: calculate forward_outsz automatically, based on field of view
    znn_cfg_parser.set('parameters', 'output_prefix',
                       dockerize_path(output_dir))
    with open(znn_config_path, 'wb') as configfile:
        znn_cfg_parser.write(configfile)
Пример #7
0
def main(main_config_fpath="../data/example/main_config.cfg"):
    '''use configuration file to find all image and
    ROI paths then start GUI application'''
    cfg_parser = ConfigParser.SafeConfigParser()
    cfg_parser.readfp(open(main_config_fpath, 'r'))

    data_dir = add_pathsep(cfg_parser.get('general', 'data_dir'))
    preprocess_dir = data_dir[0:-1] + "_preprocessed" + os.sep
    postprocess_dir = data_dir[0:-1] + "_postprocessed" + os.sep
    ttv_list = ['training' + os.sep, 'validation' + os.sep, 'test' + os.sep]
    if not os.path.isdir(data_dir):
        sys.exit("Specified data directory " + data_dir + " does not exist.")

    files = defaultdict(lambda: [None, None, None])

    for ttv in ttv_list if is_labeled(data_dir) else ['']:
        for f in os.listdir(preprocess_dir + ttv):
            basename, ext = os.path.splitext(f)
            if ext.lower() == '.tif' or ext.lower() == '.tiff':
                if basename[-4:].lower() != "_roi":
                    files[basename][0] = preprocess_dir + ttv + f
        for f in os.listdir(postprocess_dir + ttv):
            basename, ext = os.path.splitext(f)
            if ext == '.npz' and basename[-7:] != "_MANUAL":
                files[basename][1] = postprocess_dir + ttv + f
        for f in os.listdir(data_dir + ttv):
            basename, ext = os.path.splitext(f)
            if ext == '.zip':
                files[basename][2] = data_dir + ttv + f

    img_width = cfg_parser.getint('general', 'img_width')
    img_height = cfg_parser.getint('general', 'img_height')

    root = Tk()
    root.wm_title("ConvnetCellDetection")
    app = App(root, files, img_width, img_height, is_labeled(data_dir))
    root.mainloop()
def main(main_config_fpath='../data/example/main_config.cfg'):
    '''Get user-specified information from main_config.cfg'''
    cfg_parser = ConfigParser.SafeConfigParser()
    cfg_parser.readfp(open(main_config_fpath,'r'))
    
    # get directory paths
    data_dir = add_pathsep(cfg_parser.get('general', 'data_dir'))
    parent_dir = add_pathsep(os.path.dirname(data_dir[0:-1]))
    downsample_dir = data_dir[0:-1] + "_downsampled" + os.sep
    preprocess_dir = data_dir[0:-1] + "_preprocessed" + os.sep
    network_output_dir = data_dir[0:-1] + "_training_output" + os.sep
    postprocess_dir = data_dir[0:-1] + "_postprocessed" + os.sep
    ttv_list = ['training' + os.sep, 'validation' + os.sep, 'test' + os.sep]
    
    # ensure directories exist
    if not os.path.isdir(data_dir):
        sys.exit("Specified data directory " + data_dir + " does not exist.")
    for ttv in ttv_list if is_labeled(data_dir) else ['']:
        if not os.path.isdir(postprocess_dir + ttv):
            os.makedirs(postprocess_dir + ttv)

    # split training output directory if necessary
    if is_labeled(data_dir):
        split_dict = get_labeled_split(data_dir)
        split_labeled_directory(split_dict, network_output_dir, False, False)
 
    # get non-optimized postprocessing parameters
    img_width = cfg_parser.getint('general','img_width')
    img_height = cfg_parser.getint('general', 'img_height')
    do_gridsearch_postprocess_params = cfg_parser.getboolean('general', 'do_gridsearch_postprocess_params')
    min_size_wand = cfg_parser.getfloat('postprocessing', 'min_size_wand')
    max_size_wand = cfg_parser.getfloat('postprocessing', 'max_size_wand')
    
    # locate optimized postprocessing parameters or run grid search optimization
    params_cfg_parser = ConfigParser.SafeConfigParser()
    opt_params_cfg_fn = parent_dir + "optimized_postprocess_params.cfg"
    if do_gridsearch_postprocess_params and os.path.isfile(opt_params_cfg_fn):
        params_cfg_parser.readfp(open(opt_params_cfg_fn, 'r'))
    elif (do_gridsearch_postprocess_params and
          not os.path.isfile(opt_params_cfg_fn) and is_labeled(data_dir)):
        params_cfg_parser = parameter_optimization(data_dir + ttv_list[1],
                                                   preprocess_dir + ttv_list[1],
                                                   network_output_dir + ttv_list[1],
                                                   postprocess_dir + ttv_list[1],
                                                   min_size_wand,
                                                   max_size_wand, img_width, img_height,
                                                   opt_params_cfg_fn, cfg_parser)
    else:
        params_cfg_parser = cfg_parser
         
    # read postprocessing-specific parameters
    threshold = params_cfg_parser.getfloat('postprocessing', 'probability_threshold')
    min_size_watershed = params_cfg_parser.getfloat('postprocessing', 'min_size_watershed')
    merge_size_watershed = params_cfg_parser.getfloat('postprocessing', 'merge_size_watershed')
    max_footprint_str = params_cfg_parser.get('postprocessing', 'max_footprint')
    max_footprint = tuple([int(float(c)) for c in max_footprint_str.strip().strip(')').strip('(').split(",")])
    max_size_wand = params_cfg_parser.getfloat('postprocessing', 'max_size_wand')
    assert(len(max_footprint) == 2)

    # run postprocessing
    for ttv in ttv_list if is_labeled(data_dir) else ['']:
        final_rois, final_roi_probs, filenames = postprocessing(preprocess_dir + ttv, network_output_dir + ttv, 
                                               postprocess_dir + ttv, threshold, 
                                               min_size_watershed, merge_size_watershed,
                                               max_footprint, min_size_wand, max_size_wand)
    
        # Save final ROIs
        for i,roi in enumerate(final_rois):
            r = roi.max(axis=0)
            roi_name = postprocess_dir + ttv + filenames[i] + '.tif'
            tifffile.imsave(roi_name, r.astype(np.float32))
            np.savez_compressed(postprocess_dir + ttv + filenames[i] + '.npz', rois=roi, roi_probabilities=final_roi_probs[i])