def prepocess(): images = get_files(PRE_IMAGE_PATH) labels = get_files(PRE_LABEL_PATH) files = get_files(PRE_IMAGE_PATH, prefix=False) def normlize_data(image): vaild_area = (image >= 0) invaild_area = (image < 0) image[vaild_area] = np.log(image[vaild_area] + 1) i_mean = image[vaild_area].mean() i_std = image[vaild_area].std() low = i_mean - 3.5 * i_std high = i_mean + 3.5 * i_std image = (image - low) / (high - low) image[invaild_area] = -1 return image for f_i, f_l, f in tqdm(zip(images, labels, files)): im = load_image(f_i).get_data() label = load_image(f_l).get_data() im = normlize_data(im) label[label == 2] = 1 np.save(IMAGE_PATH + f[:-7] + '.npy', im) np.save(LABEL_PATH + f[:-7] + '.npy', label)
def predict_test_data(): model_files = get_files(MODEL_PATH, prefix=False) models = [] for file_name in model_files: m_setting = file_name.split('_') model_setting = {} model_setting['modelname'] = m_setting[0] model_setting[ 'axis'] = None if m_setting[2] == 'None' else m_setting[2] model_setting['loss'] = diceLoss if m_setting[3] == 'ostu': model_setting['postPocess'] = ostu elif m_setting[3] == 'around': model_setting['postPocess'] = threshold_filter else: raise NameError("postPocess error") model_setting['path'] = MODEL_PATH + file_name models.append(model_setting) test_data = get_files(TEST_DATA, prefix=False) seg_recovery(test_data, models)
def EDA_warp(): files = get_files(PRE_IMAGE_PATH, prefix=False) labels = [] images = [] for i in range(len(files)): im = load_image(PRE_IMAGE_PATH + files[i]).get_data() if im.shape == (256, 256, 180, 1): labels.append(PRE_LABEL_PATH + files[i]) images.append(PRE_IMAGE_PATH + files[i]) EDA(labels, images, '180') labels = [] images = [] for i in range(len(files)): im = load_image(PRE_IMAGE_PATH + files[i]).get_data() if im.shape == (256, 256, 166, 1): labels.append(PRE_LABEL_PATH + files[i]) images.append(PRE_IMAGE_PATH + files[i]) EDA(labels, images, '256_166') labels = [] images = [] for i in range(len(files)): im = load_image(PRE_IMAGE_PATH + files[i]).get_data() if im.shape == (192, 192, 160, 1): labels.append(PRE_LABEL_PATH + files[i]) images.append(PRE_IMAGE_PATH + files[i]) EDA(labels, images, '192')
def prepocess(): images = get_files(PRE_IMAGE_PATH) labels = get_files(PRE_LABEL_PATH) files = get_files(PRE_IMAGE_PATH, prefix=False) for f_i, f_l, f in tqdm(zip(images, labels, files)): im = load_image(f_i).get_data() label = load_image(f_l).get_data() label[label == 2] = 1 # im = cv2.resize(im,(192,192,160,1),interpolation=cv2.INTER_LINEAR) # print(im.shape) # im = normlize_data(im) np.save(IMAGE_PATH + f[:-7] + '.npy', im) np.save(LABEL_PATH + f[:-7] + '.npy', label)
def test(): files = get_files(PRE_LABEL_PATH) h1 = [] h2 = [] for f in files: label = load_image(f).get_data() h1.append(np.sum(label==1)) h2.append(np.sum(label==2)) print(sorted(h1)) print(sorted(h2))
def get_shape(): images = get_files(PRE_IMAGE_PATH, prefix=False) im_shapes = {} for f in images: im = load_image(PRE_IMAGE_PATH + f) print(im.__array__) im = im.get_data() im_shapes[f] = str(im.shape[2]) with open(INFO + 'shape.json', 'w') as f: f.write(json.dumps(im_shapes, indent=4, separators=(',', ': ')))
def main(): from sklearn.cross_validation import KFold files = get_files(LABEL_PATH, prefix=False) kf = KFold(len(files), n_folds=K, shuffle=True) for i, (train_index, valid_index) in enumerate(kf): print('第{}次训练...'.format(i)) trainset = files[train_index] validset = files[valid_index] model = segment_model(valfiles=validset) train_model(model, trainset, batchsize=3) break
def main(loss, modelname='Unet', axis=None, metric=dice_metric, postPocess=None, postPocess_str=None): from sklearn.cross_validation import KFold files = get_files(LABEL_PATH, prefix=False) K = 10 kf = KFold(len(files), n_folds=K, shuffle=True) for i, (train_index, valid_index) in enumerate(kf): print('第{}次训练...'.format(i)) trainset = files[train_index] validset = files[valid_index] model = segment_model(valfiles=validset, modelname=modelname, axis=axis, metric=metric, loss=loss, postPocess=postPocess) if axis == None: model_id = modelname + '_' + str( i) + '_None_' + postPocess_str + '_' else: model_id = modelname + '_' + str( i) * 2 + '_' + axis + '_' + postPocess_str + '_' best_score = train_model(model, trainset, batchsize=8, model_name=model_id, axis=axis) print(str(i), ': ', best_score)
def run(self): done = 0 fail = 0 ignored = 0 not_allowed = 0 total_on_folders = 0 for folder in self.main.folders: files = get_files(folder) if self.main.code == 3: files = self.main.sort_file_list(folder, files, self.main.arg['i3'], self.main.arg['i4']) total_on_a_folder = 0 for a_file in files: filename, ext = get_filename_extension(a_file) status = self.main.check_for_allow_and_ignore( a_file, filename, ext) if status == 1: ignored += 1 self.outSignal.emit( f"<b>[+] - Ignored :</b> {join(folder, a_file)}") elif status == 2: not_allowed += 1 self.outSignal.emit( f"<b>[+] - Not Allowed :</b> {join(folder, a_file)}") else: total_on_a_folder += 1 total_on_folders += 1 try: new_name = self.main.get_new_name( a_file, filename, ext, folder, total_on_a_folder, total_on_folders) except Exception as e: print(e) self.outSignal.emit( f"<b>[+] - Error :</b> {join(folder, a_file)} <b>>>></b> Error on making new name" ) self.outSignal.emit(f"<b>[+] - Stopping Renaming</b>") self.main.do_log(False, join(folder, a_file), '') break if rename_file(folder, a_file, new_name): done += 1 self.outSignal.emit( f"<b>[+] - Done :</b> {join(folder, a_file)} <b>>></b> {new_name}" ) self.main.do_log(True, join(folder, a_file), new_name) else: fail += 1 self.main.do_log(False, join(folder, a_file), new_name) self.outSignal.emit( f"<b>[+] - Failed :</b> {join(folder, a_file)} <b>>></b> {new_name}" ) self.outSignal.emit("") self.outSignal.emit(f"<b>[+] - Renaming Finished</b>") self.outSignal.emit("") total = done + fail + ignored + not_allowed self.outSignal.emit( f"<table> <tr> <td><b>[+] - Total</b></td> <td>:</td> <td>{total}</td> </tr> <tr> <td><b>[+] - Renamed</b></td> <td>:</td> <td>{done}</td> </tr> <tr> <td><b>[+] - Failed</b></td> <td>:</td> <td>{fail}</td> </tr> <tr> <td><b>[+] - Not Allowed</b></td> <td>:</td> <td>{not_allowed}</td> </tr> <tr> <td><b>[+] - Ignored</b></td> <td>:</td> <td>{ignored}</td> </tr> </table>" ) self.outSignal.emit("") if self.main.log_file is not None and self.main.make_log: self.main.log_file.close() if self.main.make_log_error: self.outSignal.emit(f"[+] - <b>Log Failed</b>") else: self.outSignal.emit( f"[+] - Log path : <b>{str(Path('logs.txt').absolute())}</b>" ) self.outSignal.emit(f"[+] - Press Finish to quit")
def get_images_info(): def get_info(image, label): def fune_range(info, target, value): for i, t in enumerate(['x', 'y', 'z', 'voxel']): if value[i] < info[target + '_' + t][0]: info[target + '_' + t][0] = value[i] if value[i] > info[target + '_' + t][1]: info[target + '_' + t][1] = value[i] return info info = { 'brain_x': [256, 0], 'brain_y': [256, 0], 'brain_z': [256, 0], 'brain_voxel': [10000, -10000], 'h1_x': [256, 0], 'h1_y': [256, 0], 'h1_z': [256, 0], 'h1_voxel': [10000, -10000], 'h2_x': [256, 0], 'h2_y': [256, 0], 'h2_z': [256, 0], 'h2_voxel': [10000, -10000], } info['shape'] = image.shape shape = image.shape for i in range(shape[0]): for j in range(shape[1]): for k in range(shape[2]): value = (i, j, k, int(image[i, j, k, 0])) if label[i, j, k, 0] == 1: info = fune_range(info, 'h1', value) elif label[i, j, k, 0] == 2: info = fune_range(info, 'h2', value) elif image[i, j, k, 0] != -1: info = fune_range(info, 'brain', value) return info images_info = {} files = get_files(PRE_IMAGE_PATH, prefix=False) image_files = [PRE_IMAGE_PATH + f for f in files] label_files = [PRE_LABEL_PATH + f for f in files] for l_f, im_f, name in tqdm(zip(label_files, image_files, files)): # label, image = load_image(l_f).get_data(), load_image(im_f).get_data() label, image = sitk.ReadImage(l_f), sitk.ReadImage(im_f) label, image = sitk.GetArrayFromImage(label), sitk.GetArrayFromImage( image) label, image = label.transpose((1, 2, 0)), image.transpose((1, 2, 0)) label, image = label[:, :, :, np.newaxis], image[:, :, :, np.newaxis] images_info[name] = get_info(image, label) variables = [ 'brain_x', 'brain_y', 'brain_z', 'brain_voxel', 'h1_x', 'h1_y', 'h1_z', 'h1_voxel', 'h2_x', 'h2_y', 'h2_z', 'h2_voxel', ] for file in images_info.keys(): for vari in variables: images_info[file][vari+'_range'] = \ images_info[file][vari][1] - images_info[file][vari][0] for file in images_info.keys(): images_info[file]['h_z_range'] = \ images_info[file]['h2_z'][1] - images_info[file]['h1_z'][0] for file in images_info.keys(): for axis in ['x', 'y', 'z']: for h in ['h1_', 'h2_']: images_info[file]['effec_' + h + axis] = [0, 0] images_info[file]['effec_' + h + axis][0] = images_info[file][ h + axis][0] - images_info[file]['brain_' + axis][0] images_info[file]['effec_' + h + axis][1] = images_info[file][ h + axis][1] - images_info[file]['brain_' + axis][0] with open(INFO + 'image_info.json', 'w') as f: f.write(json.dumps(images_info, indent=4, separators=(',', ': ')))