def __init__(self, file_name, mask_files, file_type, patch_type, auto_save_patch = True):
        self._cfg = config_fun.config()
        self._file_name = file_name
        self._mask_files = mask_files
        self._auto_save_patch = auto_save_patch
        self._file_type = file_type #train or val
        self._patch_type = patch_type #postive or negative
        self._neg_start_idx = 3 #neglect!!!
        self._pos_start_idx = self._neg_start_idx + self._cfg.num_neg_classes #neglect!!! 3+1=4
        self._img = slide_fun.AllSlide(self._file_name)
        
        self._merged_mask_size = np.ceil(np.array(self._img.level_dimensions[0])/self._cfg.rescaled_times).astype(np.int)
        
        self._merged_mask = None
        
        self._merged_mask_level = None
    
        
        self._final_level = self._get_final_level(self._cfg.target_level)

        self._rescaled_times = self._get_rescaled_times_of_final_level(self._final_level)
        print('level %d rescaled times campared with level0: %d'%(self._final_level, self._rescaled_times))
        self._times_target_level_divide_rescaled_mask = int(self._cfg.rescaled_times/self._rescaled_times)

        self._min_patch_size = int(self._cfg.patch_size/self._times_target_level_divide_rescaled_mask)
 def __init__(self, data_type='train', frac=1, file_name=None):
     cfg = config_fun.config()
     self._data_type = data_type
     self._compose = patch_preprocess_fun.get_train_val_compose()
     if file_name is None:
         self._data, self._label, self._name = hdf5_fun.get_all_data_label_name(
             cfg, data_type=data_type, frac=frac)
     else:
         self._data, self._label, self._name = hdf5_fun.h5_extract_data_label_name(
             cfg.patch_size, file_name)
     assert self._data.shape[0] == self._label.shape[0]
Exemplo n.º 3
0
def get_gm_compose(input_size=None):
    cfg = config_fun.config()
    if input_size is None: input_size = get_input_size(cfg)
    info = get_mean_std(cfg)
    normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
    compose = transforms.Compose([
        transforms.Resize(input_size),
        transforms.ToTensor(),
        normalize,
    ])
    return compose
Exemplo n.º 4
0
def get_h5_compose():
    cfg = config_fun.config()
    input_size = get_input_size(cfg)
    info = get_mean_std(cfg)
    normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
    compose = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomResizedCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    return compose
Exemplo n.º 5
0
    def __init__(self, file_name, mask_files, file_type, patch_type, auto_save_patch = True):
        self._cfg = config_fun.config()
        self._file_name = file_name
        self._mask_files = mask_files
        self._auto_save_patch = auto_save_patch
        self._file_type = file_type
        self._patch_type = patch_type

        self._neg_start_idx = 3
        self._pos_start_idx = self._neg_start_idx + self._cfg.num_neg_classes

        self._img = slide_fun.AllSlide(self._file_name)
        self._max_mask = None
        self._max_mask_size = np.ceil(np.array(self._img.level_dimensions[0])/self._cfg.max_frac).astype(np.int)
        self._max_mask_level = None

        self._min_patch_size = int(self._cfg.patch_size/self._cfg.min_frac)
Exemplo n.º 6
0
def convert_patch_to_hdf5():
    cfg = config_fun.config()
    train_pos_patches = glob.glob(
        os.path.join(cfg.patch_save_folder, 'train', 'pos', '*' + cfg.img_ext))
    train_neg_patches = glob.glob(
        os.path.join(cfg.patch_save_folder, 'train', 'neg', '*' + cfg.img_ext))
    val_pos_patches = glob.glob(
        os.path.join(cfg.patch_save_folder, 'val', 'pos', '*' + cfg.img_ext))
    val_neg_patches = glob.glob(
        os.path.join(cfg.patch_save_folder, 'val', 'neg', '*' + cfg.img_ext))

    train_patches = _add_label(train_pos_patches, train_neg_patches)
    val_patches = _add_label(val_pos_patches, val_neg_patches)
    print('processing train patches~')
    _precoss_patches(cfg, train_patches, 'train')
    print('processing validation patches~')
    _precoss_patches(cfg, val_patches, 'val')
Exemplo n.º 7
0
def generate_patch(auto_save_patch=True):
    cfg = config_fun.config()
    with open(cfg.split_file) as f:
        split_data = json.load(f)

    train_data = filter(
        lambda item: item['info'] == 'train_tumor' or item['info'] ==
        'train_normal', split_data)
    val_data = filter(
        lambda item: item['info'] == 'val_tumor' or item['info'] ==
        'val_normal', split_data)
    # test_data  = filter(lambda item: item['info'] == 'test_tumor' or
    #                                 item['info'] == 'test_normal', split_data)

    _prepare_data(cfg, train_data, 'train', auto_save_patch=auto_save_patch)
    _prepare_data(cfg, val_data, 'val', auto_save_patch=auto_save_patch)

    train_patch = get_coors(cfg, 'train')
    val_patch = get_coors(cfg, 'val')

    print('train file %d' % len(train_patch))
    print('val file %d' % len(val_patch))
Exemplo n.º 8
0
def random_vis_hdf5():
    cfg = config_fun.config()
    print('vis train hdf5 ~')
    _random_vis_hdf5(cfg, 'train')
    print('vis validation hdf5 ~')
    _random_vis_hdf5(cfg, 'val')