Example #1
0
def samples_replace(
        source_path="/data/fyl/data_samples/tianchild_cubes_overbound/npy_non_fine",
        target_path="/data/fyl/data_samples/tianchild_cubes_overbound/npy_non"
):
    '''
	filelist = glob.glob(source_path+'/*.npy')
	for file in filelist:
		filename = os.path.basename(file)
		target_file = target_path + '/' + filename
		shutil.copyfile(file, target_file)
		print(target_file)
	'''
    bt.directory_arrange(source_path, target_path, mode='move')
Example #2
0
 def __init__(self,
              filelist,
              data_size=64,
              mode=None,
              file_retrieval=False):
     if type(filelist) == str:
         self.filelist = bt.filelist_load(filelist)
     elif type(filelist) == list:
         self.filelist = filelist
     else:
         print('invalid filelist type:{}'.format(type(filelist)))
         exit()
     if 'lidc' in filelist[0]:
         self.dataset_name = 'LIDC'
     elif 'sph' in filelist[0]:
         self.dataset_name = 'SPH'
     elif 'luna' in filelist[0]:
         self.dataset_name = 'LUNA'
     elif 'tianchild' in filelist[0]:
         self.dataset_name = 'TLD'
     self.data_size = data_size
     self.mode = mode
     self.file_retrieval = file_retrieval
os.makedirs(net_store_path)
if os.access(tensorboard_path, os.F_OK):
    shutil.rmtree(tensorboard_path)

#data arrangement
luna_dir = "../data_samples/luna_cubes_56_overbound"
#data_dir2 = "../data_samples/tianchi_cubes_56_overbound"
slh_dir = "../data_samples/slh_cubes_56_overbound"
luna_trainsets = [
    "subset0", "subset1", "subset2", "subset3", "subset4", "subset5",
    "subset6", "subset7", "subset8"
]
luna_valsets = ["subset9"]
tianchi_trainsets = ["train", "val"]
#train_sets = ["subset9"]
'''
if "pfilelist_path" in dir() and os.path.exists(pfilelist_path):
	print("read pfilelist from: %s" %(pfilelist_path))
	pfiles = bt.filelist_load(pfilelist_path)
	#pfilelist_file = open(pfilelist_path, "r")
	#pfiles = pfilelist_file.readlines()
	#for pfi in range(len(pfiles)):
	#	pfiles[pfi] = pfiles[pfi][:-1]
	#pfilelist_file.close()
else:
	pfiles = []
	for set in train_sets:
		train_dir = os.path.join(data_dir1, set)
		pdir = os.path.join(train_dir,"npy","*.npy")
		pfiles.extend(glob.glob(pdir))
		#ndir = os.path.join(train_dir,"npy_non","*.npy")
Example #4
0
MAX_BOUND = float(config["MAX_BOUND"])
MIN_BOUND = float(config["MIN_BOUND"])
PIXEL_MEAN = float(config["PIXEL_MEAN"])
NORM_CROP = config["NORM_CROP"]

REGION_SIZE = 64
BATCH_SIZE = 10

net_init_path = "models_tensorflow/lidc_3D_multi_crop_net_64_aug2"
net_init_files = [
    net_init_path + "/epoch7/epoch7", net_init_path + "/epoch9/epoch9",
    net_init_path + "/epoch10/epoch10"
]
test_filelist_path = net_init_path + "/filelist_val.log"
test_files = bt.filelist_load(test_filelist_path)
test_num = len(test_files)

#net construct
volume_input = tf.placeholder(tf.float32,
                              [None, REGION_SIZE, REGION_SIZE, REGION_SIZE])
volume_reshape = tf.reshape(volume_input,
                            [-1, REGION_SIZE, REGION_SIZE, REGION_SIZE, 1])
real_label = tf.placeholder(tf.float32, [None, 2])
net_outs1, variables1 = tmc.multi_crop_net(volume_reshape, poolings=[1, 1, 2])
net_outs2, variables2 = tmc.multi_crop_net(volume_reshape, poolings=[1, 1, 2])
net_outs3, variables3 = tmc.multi_crop_net(volume_reshape, poolings=[1, 1, 2])
prediction_fusion = net_outs1['sm_out'] + net_outs2['sm_out'] + net_outs3[
    'sm_out']
correct_prediction = tf.equal(tf.argmax(prediction_fusion, 1),
                              tf.argmax(real_label, 1))
Example #5
0
from tensorflow.python import debug as tf_debug
import tensorflow as tf
import numpy as np
import os
import sys
import shutil
import glob
import math
import random
sys.path.append("/home/fyl/programs/lung_project")
from toolbox import BasicTools as bt
from toolbox import MITools as mt
from toolbox import Data_Augmentation as da
from toolbox import TensorflowTools as tft

constants = bt.read_constants("./constants2.txt")
os.environ['CUDA_VISIBLE_DEVICES'] = constants["GPU_DEVICE"]
REGION_SIZE = constants["REGION_SIZE"]
MAX_BOUND = float(constants["MAX_BOUND"])
MIN_BOUND = float(constants["MIN_BOUND"])
PIXEL_MEAN = float(constants["PIXEL_MEAN"])

#NUM_EPOCH = 200
SNAPSHOT_EPOCH = 20
#DECAY_EPOCH = 0
#INITIAL_LEARNING_RATE = 0.001
#DECAY_LEARNING_RATE = 1.0

BATCH_SIZE = 30
VALIDATION_RATE = 0.2
    label = label_dict[opt.label_mode]
    use_gpu = opt.use_gpu
    net_file = opt.load_model_path

    if "test_paths" in dir():
        all_patients = []
        for path in test_paths:
            all_patients += glob(path + "/*.mhd")
        if len(all_patients) <= 0:
            print("No patient found")
            exit()
    else:
        print("No test data")
        exit()
    if hasattr(opt, 'filelists') and 'test' in opt.filelists.keys():
        test_samples = bt.filelist_load(opt.filelists['test'])
        test_uids = []
        for test_sample in test_samples:
            sample_uid = os.path.basename(test_sample).split('_')[0]
            if sample_uid not in test_uids:
                test_uids.append(sample_uid)
    #else:
    #	for path in opt.filelists['test']:
    #		test_samples = glob(path + '/*.mhd')
    if 'vision_path' in dir() and vision_path is not None and not os.access(
            vision_path, os.F_OK):
        os.makedirs(vision_path)
    if os.access(evaluation_path, os.F_OK): shutil.rmtree(evaluation_path)
    if not os.access(evaluation_path, os.F_OK): os.makedirs(evaluation_path)

    #model = models.DensecropNet(input_size=region_size, drop_rate=0, growth_rate=64, num_blocks=4, num_fin_growth=3).eval()
Example #7
0
def get_pathology_filelists(datasetidx=''):
    root_name = 'root_dir'
    if datasetidx is not None: root_name += str(datasetidx)
    if hasattr(opt, root_name) and getattr(opt, root_name) is not None:
        #filelists, folddict = lt.filelist_training(opt.root_dir, remove_uncertain=opt.remove_uncertain, shuffle = opt.filelist_shuffle, cross_fold = opt.num_cross_folds, val_fold = opt.val_fold, test_fold = opt.test_fold)
        file_root = getattr(opt, root_name)
        if os.path.isfile(file_root):
            filelist = bt.filelist_load(file_root)
        else:
            filelist = glob.glob(
                os.path.join(getattr(opt, root_name), "npy", "*.npy"))
        #mt.sample_statistics(filelist, True)
        if 'lidc' in getattr(opt, root_name) and hasattr(
                opt, 'remove_uncertain') and opt.remove_uncertain:
            filelist = lt.filelist_remove_uncertain(filelist)
        if opt.filelist_shuffle:
            random.shuffle(filelist)
        filelists, folddict = bt.foldlist(
            filelist, opt.num_cross_folds, {
                'val': getattr(opt, 'val_fold' + str(datasetidx)),
                'test': getattr(opt, 'test_fold' + str(datasetidx))
            })
        if 'train' in filelists.keys():
            bt.filelist_store(
                filelists['train'], save_path + '/filelist' + str(datasetidx) +
                '_train_fold' + str(folddict['train']) + '.log')
        if 'val' in filelists.keys():
            bt.filelist_store(
                filelists['val'], save_path + '/filelist' + str(datasetidx) +
                '_val_fold' + str(folddict['val']) + '.log')
        if 'test' in filelists.keys():
            bt.filelist_store(
                filelists['test'], save_path + '/filelist' + str(datasetidx) +
                '_test_fold' + str(folddict['test']) + '.log')
        #vis.log("trainfold:{} valfold:{} testfold:{}" .format(folddict['train'], folddict['val'], folddict['test']))
        print("filelist generated")
    else:
        filelists = {}
        filelists['train'] = bt.filelist_load(
            getattr(opt, 'filelists' + str(datasetidx))['train'])
        filelists['val'] = bt.filelist_load(
            getattr(opt, 'filelists' + str(datasetidx))['val'])
        bt.filelist_store(filelists['train'],
                          save_path + '/filelist_train.log')
        bt.filelist_store(filelists['val'], save_path + '/filelist_val.log')
        if 'test' in getattr(opt, 'filelists' + str(datasetidx)).keys():
            filelists['test'] = bt.filelist_load(
                getattr(opt, 'filelists' + str(datasetidx))['test'])
            bt.filelist_store(filelists['test'],
                              save_path + '/filelist_test.log')
        print("filelist loaded")
    return filelists
Example #8
0
        batch_normalization_statistic=False,
        bn_params=bn_params)
    prediction_out = outputs['sm_out']
    saver = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    saver.restore(sess, net_file)

    start_time = time.time()
    #patient_evaluations = open(evaluation_path + "/patient_evaluations.log", "w")
    results = []
    CPMs = []
    CPMs2 = []
    test_patients = all_patients[4:5]
    bt.filelist_store(all_patients, evaluation_path + "/patientfilelist.log")
    #random.shuffle(test_patients)
    for p in range(len(test_patients)):
        patient = test_patients[p]
        #patient = "./LUNA16/subset9/1.3.6.1.4.1.14519.5.2.1.6279.6001.212608679077007918190529579976.mhd"
        patient = "./LUNA16/subset9/1.3.6.1.4.1.14519.5.2.1.6279.6001.102681962408431413578140925249.mhd"
        #patient = "./TIANCHI_examples/LKDS-00005.mhd"
        uid = mt.get_serie_uid(patient)
        annotations = mt.get_annotations(uid, annotation_file)
        if len(annotations) == 0:
            print('%d/%d patient %s has no annotations, ignore it.' %
                  (p + 1, len(test_patients), uid))
            #patient_evaluations.write('%d/%d patient %s has no annotations, ignore it\n' %(p+1, len(test_patients), uid))
            continue

        print('%d/%d processing patient:%s' % (p + 1, len(test_patients), uid))
Example #9
0
        net_init_paths[1] + "/batch_normalization_statistic.npy",
        net_init_paths[2] + "/batch_normalization_statistic_25.npy"
    ]
    #pfilelist_path = net_init_paths[0] + "/pfilelist.log"
    #nfilelist_path = net_init_paths[0] + "/nfilelist.log"
if not 'pfilelist_path' in dir():
    pfilelist_path = None
if not 'nfilelist_path' in dir():
    nfilelist_path = None

if os.access(net_store_path, os.F_OK):
    shutil.rmtree(net_store_path)
os.makedirs(net_store_path)
if os.access(tensorboard_path, os.F_OK):
    shutil.rmtree(tensorboard_path)
'''
#data arrangement
luna_dir = "luna_cubes_56_overbound"
#data_dir2 = "tianchi_cubes_56_overbound"
slh_dir = "slh_cubes_56_overbound"
luna_trainsets = ["subset0", "subset1", "subset2", "subset3", "subset4", "subset5", "subset6", "subset7", "subset8"]
luna_valsets = ["subset9"]
tianchi_trainsets = ["train", "val"]
#luna_trainsets = ["subset9"]

if "pfilelist_path" in dir() and os.path.exists(pfilelist_path):
	print("read pfilelist from: %s" %(pfilelist_path))
	pfiles = bt.filelist_load(pfilelist_path)
	#pfilelist_file = open(pfilelist_path, "r")
	#pfiles = pfilelist_file.readlines()
	#for pfi in range(len(pfiles)):
Example #10
0
	if "test_paths" in dir():
		all_patients = []
		for path in test_paths:
			all_patients += glob(path + "/*.mhd")
		if len(all_patients)<=0:
			print("No patient found")
			exit()
	else:
		print("No test data")
		exit()
	if "test_sample_filelist" not in dir():
		for path in test_sample_paths:
			test_samples = glob(path + '/*.npy')
	else:
		test_samples = bt.filelist_load(test_sample_filelist)
	test_uids = []
	for test_sample in test_samples:
		sample_uid = os.path.basename(test_sample).split('_')[0]
		if sample_uid not in test_uids:
			test_uids.append(sample_uid)
	if 'vision_path' in dir() and vision_path is not None and not os.access(vision_path, os.F_OK):
		os.makedirs(vision_path)
	#if os.access(evaluation_path, os.F_OK): shutil.rmtree(evaluation_path)
	if not os.access(evaluation_path, os.F_OK): os.makedirs(evaluation_path)
	shutil.copyfile(net_file, evaluation_path+'/'+net_file.split('/')[-1])

	model = models.DensecropNet(input_size=REGION_SIZE, drop_rate=0, growth_rate=64, num_blocks=4, num_fin_growth=3)
	model.load(net_file)
	model.eval()
	model.cuda()
    shutil.rmtree(tensorboard_path)

#data arrangement
data_dir1 = "luna_cubes_56_overbound"
#data_dir2 = "tianchi_cubes_56_overbound"
data_dir3 = "slh_cubes_56_overbound"
train_sets = [
    "subset0", "subset1", "subset2", "subset3", "subset4", "subset5",
    "subset6", "subset7", "subset8"
]
train_sets2 = ["train", "val"]
#train_sets = ["subset9"]

if "pfilelist_path" in dir() and os.path.exists(pfilelist_path):
    print("read pfilelist from: %s" % (pfilelist_path))
    pfiles = bt.filelist_load(pfilelist_path)
    #pfilelist_file = open(pfilelist_path, "r")
    #pfiles = pfilelist_file.readlines()
    #for pfi in range(len(pfiles)):
    #	pfiles[pfi] = pfiles[pfi][:-1]
    #pfilelist_file.close()
else:
    pfiles = []
    for set in train_sets:
        train_dir = os.path.join(data_dir1, set)
        pdir = os.path.join(train_dir, "npy", "*.npy")
        pfiles.extend(glob.glob(pdir))
        #ndir = os.path.join(train_dir,"npy_non","*.npy")
        #nfiles.extend(glob.glob(ndir))
    if "data_dir2" in dir():
        for set in train_sets2:
    fusion_file = "models_tensorflow/luna_slh_3D_fusion2/epoch6/epoch6"
    bn_files = [
        "models_tensorflow/luna_slh_3D_bndo_flbias_l5_20_aug_stage3/batch_normalization_statistic.npy",
        "models_tensorflow/luna_slh_3D_bndo_flbias_l5_30_aug2_stage2/batch_normalization_statistic.npy",
        "models_tensorflow/luna_slh_3D_bndo_flbias_l6_40_aug_stage2/batch_normalization_statistic_25.npy"
    ]
    #annotation_file = "LUNA16/csvfiles/annotations_corrected.csv"
    annotation_file = "../datasets/SPH_data/annotations.xlsx"
    #exclude_file = "LUNA16/csvfiles/annotations_excluded.csv"
    vision_path = "./detection_vision/sph"
    result_path = "./results"
    evaluation_path = result_path + "/evaluation_" + FUSION_MODE + "fusion_segmentation_sph"
    result_file = evaluation_path + "/result.csv"

    if "test_filelist" in dir() and os.access(test_filelist, os.F_OK):
        all_patients = bt.filelist_load(test_filelist)
    elif "test_paths" in dir():
        all_patients = []
        #for path in test_paths:
        #	all_patients += glob(path + "/*.mhd")
        for path in test_paths:
            all_patients.extend(bt.get_dirs(path))
        if len(all_patients) <= 0:
            print("No patient found")
            exit()
        #random.shuffle(all_patients)
        bt.filelist_store(all_patients, evaluation_path + "_filelist.log")
    else:
        print("No test data")
        exit()
    if 'vision_path' in dir() and 'vision_path' is not None and not os.access(
        predlist[0].append(predsplit[0])
        predlist[1].append(float(predsplit[1]))
    return predlist


def prediction_filter(predlist, lower, upper):
    filteredlist = ([], [])
    for pi in range(len(predlist[0])):
        if predlist[1][pi] >= lower and predlist[1][pi] <= upper:
            filteredlist[0].append(predlist[0][pi])
            filteredlist[1].append(predlist[1][pi])
    return filteredlist


strlist1 = bt.filelist_load(
    'predictions/train/DensecropNet_Iterative_detection_2_stage2_epoch4_corrpreds.log'
)
predlist1 = extract_predictions(strlist1)
filteredlist1 = prediction_filter(predlist1, 0, 0.1)
predset1 = set(filteredlist1[0])
strlist2 = bt.filelist_load(
    'predictions/train/DensecropNet_Iterative_detection_3_epoch6_corrpreds.log'
)
predlist2 = extract_predictions(strlist2)
filteredlist2 = prediction_filter(predlist2, 0, 0.1)
predset2 = set(filteredlist2[0])
strlist3 = bt.filelist_load(
    'predictions/train/DensecropNet_Iterative_detection_4_epoch4_corrpreds.log'
)
predlist3 = extract_predictions(strlist3)
filteredlist3 = prediction_filter(predlist3, 0, 0.1)
Example #14
0
def get_detection_filelists(patient_uids=None,
                            filepaths=None,
                            easy_eliminate_filelist=None,
                            config={}):
    save_path = config['model_root'] + '/' + config['env']
    if easy_eliminate_filelist is not None:
        easy_eliminate_filelist = bt.filelist_load(easy_eliminate_filelist)
    if filepaths is None:
        filelists = {}
        filelists['train'] = bt.filelist_load(config['filelists']['train'])
        filelists['val'] = bt.filelist_load(config['filelists']['val'])
        if easy_eliminate_filelist is not None:
            filelists['train'] = bt.filelist_eliminate(
                filelists['train'], easy_eliminate_filelist)
            filelists['val'] = bt.filelist_eliminate(filelists['val'],
                                                     easy_eliminate_filelist)
        bt.filelist_store(filelists['train'],
                          save_path + '/filelist_train.log')
        bt.filelist_store(filelists['val'], save_path + '/filelist_val.log')
        print("filelist loaded")
    else:
        #filepaths=["/home/fyl/datasets/luna_64/train", "/home/fyl/datasets/luna_64/test", "/home/fyl/datasets/npy_non_set"]
        #filelist = []
        if patient_uids is None:
            patient_uids = []
            for filepath in filepaths:
                files = glob.glob(filepath + '/*.npy')
                #filelist.extend(files)
                for file in files:
                    filename = os.path.basename(file)
                    filenamenoext = os.path.splitext(filename)[0]
                    fileinfos = filenamenoext.split('_')
                    annolabel = fileinfos[-1]
                    patient_uid = fileinfos[0]
                    if patient_uid not in patient_uids:
                        patient_uids.append(patient_uid)
        elif type(patient_uids) == str:
            patient_uids = bt.filelist_load(patient_uids)
        #patient_temp = patient_uids[int(len(patient_uids)/10.0+0.5)*3:]
        #random.shuffle(patient_temp)
        #patient_uids[int(len(patient_uids)/10.0+0.5)*3:] = patient_temp
        if config['filelist_shuffle']: random.shuffle(patient_uids)
        bt.filelist_store(patient_uids, save_path + '/patientlist.log')
        patient_folds, folddict = bt.foldlist(patient_uids,
                                              config['num_cross_folds'], {
                                                  'val': config['val_fold'],
                                                  'test': config['test_fold']
                                              })

        filelist_overall = []
        filelists = {}
        for setname in patient_folds.keys():
            filelists[setname] = []
        for filelist in filepaths:
            if os.path.isfile(filelist):
                files = bt.filelist_load(filelist)
            else:
                files = os.listdir(filelist)
                for f in range(len(files)):
                    files[f] = filelist + '/' + files[f]
                #files = glob.glob(filelist+'/*.%s' %(fileext))
            filelist_overall.extend(files)
            if 'lidc' in filelist and 'remove_uncertain' in config.keys(
            ) and config['remove_uncertain']:
                filelist_overall = lt.filelist_remove_uncertain(
                    filelist_overall)
        if easy_eliminate_filelist is not None:
            filelist_overall = bt.filelist_eliminate(filelist_overall,
                                                     easy_eliminate_filelist)
        if config['filelist_shuffle']: random.shuffle(filelist_overall)
        for file in filelist_overall:
            filename_split = os.path.splitext(
                os.path.basename(file))[0].split('_')
            #if 'label_choice' in config.keys() and filename_split[-1]=='annotation' and  config['label_choice']!=filename_split[2]: continue
            patient_uid = filename_split[0]
            for setname in patient_folds.keys():
                if patient_uid in patient_folds[setname]:
                    filelists[setname].append(file)
        for setname in patient_folds.keys():
            bt.filelist_store(
                filelists[setname], save_path + '/filelist_' + setname +
                '_fold' + str(folddict[setname]) + '.log')
        bt.filelist_store(filelist_overall, save_path + '/filelist.log')
        print("filelist generated")
    '''
	filelists = get_filelists_patientwise(patient_uids, filepaths, fileext='npy', config=config)
	for setname in filelists.keys():
		if easy_eliminate_filelist is not None:
			filelists[setname] = bt.filelist_eliminate(filelists[setname], easy_eliminate_filelist)
	'''
    return filelists
    shutil.rmtree(tensorboard_path)

#data arrangement
data_dir1 = "luna_cubes_56_overbound"
#data_dir2 = "tianchi_cubes_56_overbound"
data_dir3 = "slh_cubes_56_overbound"
train_sets = [
    "subset0", "subset1", "subset2", "subset3", "subset4", "subset5",
    "subset6", "subset7", "subset8"
]
train_sets2 = ["train", "val"]
#train_sets = ["subset9"]

if "pfilelist_path" in dir() and os.path.exists(pfilelist_path):
    print("read pfilelist from: %s" % (pfilelist_path))
    pfiles = bt.filelist_load(pfilelist_path)
    #pfilelist_file = open(pfilelist_path, "r")
    #pfiles = pfilelist_file.readlines()
    #for pfi in range(len(pfiles)):
    #	pfiles[pfi] = pfiles[pfi][:-1]
    #pfilelist_file.close()
else:
    pfiles = []
    for set in train_sets:
        train_dir = os.path.join(data_dir1, set)
        pdir = os.path.join(train_dir, "npy", "*.npy")
        pfiles.extend(glob.glob(pdir))
        #ndir = os.path.join(train_dir,"npy_non","*.npy")
        #nfiles.extend(glob.glob(ndir))
    if "data_dir2" in dir():
        for set in train_sets2:
def detection_fusion(
        test_path=None,
        result_path="./experiments_dt/evaluations_tianchild_densecropnet_31,32",
        **kwargs):
    opt.parse(kwargs)
    if test_path is None:
        test_paths = ["/data/fyl/datasets/Tianchi_Lung_Disease/train"]
    else:
        test_paths = [test_path]
    #test_sample_filelist = "/data/fyl/models_pytorch/DensecropNet_detection_test_rfold1/filelist_val_fold0.log"
    net_files = [
        "/data/fyl/models_pytorch/DensecropNet_arterio_detection_rfold1/DensecropNet_arterio_detection_rfold1_epoch2",
        "/data/fyl/models_pytorch/DensecropNet_lymphnodecal_detection_rfold1/DensecropNet_lymphnodecal_detection_rfold1_epoch2"
    ]
    annotation_file = "/data/fyl/datasets/Tianchi_Lung_Disease/chestCT_round1_annotation.csv"
    #candidate_file = "/data/fyl/datasets/Tianchi_Lung_Disease/candidate.csv"
    labels = [31, 32]
    #result_path = "./experiments_dt/evaluations_tianchild_densecropnet_fusion"
    #vision_path = result_path
    #result_file = result_path + "/result.csv"
    hard_negatives_file = result_path + "/hard_negatives.csv"

    region_size = opt.input_size
    batch_size = opt.batch_size
    use_gpu = opt.use_gpu

    if 'vision_path' in dir() and vision_path is not None and not os.access(
            vision_path, os.F_OK):
        os.makedirs(vision_path)
    #if os.access(result_path, os.F_OK): shutil.rmtree(result_path)
    if not os.access(result_path, os.F_OK): os.makedirs(result_path)

    if "test_paths" in dir():
        all_patients = []
        for path in test_paths:
            all_patients += glob(path + "/*.mhd")
        if len(all_patients) <= 0:
            print("No patient found")
            exit()
    else:
        print("No test data")
        exit()
    if hasattr(opt, 'filelists') and 'test' in opt.filelists.keys():
        test_samples = bt.filelist_load(opt.filelists['test'])
        test_uids = []
        for test_sample in test_samples:
            sample_uid = os.path.basename(test_sample).split('_')[0]
            if sample_uid not in test_uids:
                test_uids.append(sample_uid)
        pd.DataFrame(data=test_uids,
                     columns=['series_uid'
                              ]).to_csv(result_path + '/patients_uid.csv',
                                        index=False)
    #else:
    #	for path in opt.filelists['test']:
    #		test_samples = glob(path + '/*.mhd')

    #model = models.DensecropNet(input_size=region_size, drop_rate=0, growth_rate=64, num_blocks=4, num_fin_growth=3).eval()
    networks = [
        getattr(models, opt.model)(input_size=region_size,
                                   **opt.model_setup).eval()
        for m in range(len(net_files))
    ]
    for n in range(len(net_files)):
        networks[n].load(net_files[n])
        print('model loaded from %s' % (net_files[n]))
        shutil.copyfile(net_files[n],
                        result_path + '/' + net_files[n].split('/')[-1])
        if use_gpu: networks[n].cuda()

    start_time = time.time()
    #patient_evaluations = open(result_path + "/patient_evaluations.log", "w")
    results = []
    labeled_results = [[] for l in range(len(labels))]
    CPMs = [[] for l in range(len(labels))]
    #hard_negatives = []
    test_patients = all_patients
    #random.shuffle(test_patients)
    bt.filelist_store(test_patients, result_path + "/patientfilelist.log")
    for p in range(len(test_patients)):
        patient = test_patients[p]
        uid = mt.get_mhd_uid(patient)
        if 'test_uids' in dir() and uid not in test_uids:
            print("%d/%d patient %s not belong to test set" %
                  (p + 1, len(test_patients), uid))
            continue

        print('%d/%d processing patient:%s' % (p + 1, len(test_patients), uid))
        full_image_info = sitk.ReadImage(patient)
        full_scan = sitk.GetArrayFromImage(full_image_info)
        origin = np.array(full_image_info.GetOrigin(
        ))[::-1]  #the order of origin and old_spacing is initially [z,y,x]
        old_spacing = np.array(full_image_info.GetSpacing())[::-1]
        image, new_spacing = mt.resample(full_scan, old_spacing,
                                         np.array([1, 1, 1]))
        #image = np.load(patient)
        #new_spacing = np.array([1, 1, 1])
        #origin = np.array([0, 0, 0])
        print('Resample Done. time:{}s'.format(time.time() - start_time))

        candidate_results = nd.slic_candidate(image, 20, focus_area='body')
        if candidate_results is None:
            continue
        candidate_coords, candidate_labels, cluster_labels = candidate_results
        if 'vision_path' in dir() and vision_path is not None:
            np.save(vision_path + "/" + uid + "_segmask.npy", cluster_labels)
            #segresult = lc.segment_vision(image, cluster_labels)
            #np.save(vision_path + "/" + uid + "_segresult.npy", segresult)
        print('Candidate Done. time:{}s'.format(time.time() - start_time))
        print('candidate number:%d' % (len(candidate_coords)))

        candidate_predictions = nd.precise_detection_pt(
            image,
            region_size,
            candidate_coords,
            networks,
            None,
            batch_size,
            use_gpu=use_gpu,
            prediction_threshold=0.4)
        labeled_predictions = []
        for l in range(len(labels)):
            label = labels[l]
            print('label: %d' % (label))
            evaluation_path = result_path + '/' + str(label)
            if not os.access(evaluation_path, os.F_OK):
                os.makedirs(evaluation_path)
            if 'annotation_file' in dir():
                annotations = mt.get_challenge_annotations(uid,
                                                           annotation_file,
                                                           label=label)
                if len(annotations) == 0:
                    print("%d/%d patient %s has no annotations, ignore it." %
                          (p + 1, len(test_patients), uid))
                    #patient_evaluations.write('%d/%d patient %s has no annotations, ignore it\n' %(p+1, len(test_patients), uid))
                    continue
                #make a real lesion visualization
                if 'vision_path' in dir() and vision_path is not None:
                    real_lesions = []
                    for annotation in annotations:
                        #real_lesion = np.int_([abs(annotation[2]-origin[0])/new_spacing[0], abs(annotation[1]-origin[1])/new_spacing[1], abs(annotation[0]-origin[2])/new_spacing[2]])
                        real_lesion = mt.coord_conversion(annotation[:3][::-1],
                                                          origin,
                                                          old_spacing,
                                                          full_scan.shape,
                                                          image.shape,
                                                          dir_array=True)
                        real_lesions.append(real_lesion)
                    annotation_vision = cvm.view_coordinates(image,
                                                             real_lesions,
                                                             window_size=10,
                                                             reverse=False,
                                                             slicewise=False,
                                                             show=False)
                    np.save(evaluation_path + "/" + uid + "_annotations.npy",
                            annotation_vision)
            positive_predictions = candidate_predictions[l] > 0
            result_predictions, result_labels = nd.predictions_map_fast(
                cluster_labels, candidate_predictions[l][positive_predictions],
                candidate_labels[positive_predictions])
            labeled_predictions.append(result_predictions)
            if 'vision_path' in dir() and vision_path is not None:
                np.save(evaluation_path + "/" + uid + "_detlabels.npy",
                        result_labels)
                np.save(evaluation_path + "/" + uid + "_detpredictions.npy",
                        result_predictions)
                #detresult = lc.segment_vision(image, result_labels)
                #np.save(evaluation_path+"/"+uid+"_detresult.npy", detresult)
            lesion_center_predictions = nd.prediction_centering_fast(
                result_predictions)
            #lesion_center_predictions, prediction_labels = nd.prediction_cluster(result_predictions)
            if 'vision_path' in dir() and vision_path is not None:
                lesions = []
                for nc in range(len(lesion_center_predictions)):
                    lesions.append(np.int_(lesion_center_predictions[nc][0:3]))
                volume_predicted = cvm.view_coordinates(result_predictions *
                                                        1000,
                                                        lesions,
                                                        window_size=10,
                                                        reverse=False,
                                                        slicewise=False,
                                                        show=False)
                np.save(evaluation_path + "/" + uid + "_prediction.npy",
                        volume_predicted)
                if 'prediction_labels' in dir():
                    prediction_cluster_vision = lc.segment_color_vision(
                        prediction_labels)
                    np.save(
                        evaluation_path + "/" + uid +
                        "_prediction_clusters.npy", prediction_cluster_vision)
            print('Detection Done. time:{}s'.format(time.time() - start_time))
            '''
			#randomly create a result for testing
			lesion_center_predictions = []
			for nc in range(10):
				lesion_center_predictions.append([random.randint(0,image.shape[0]-1), random.randint(0,image.shape[1]-1), random.randint(0,image.shape[2]-1), random.random()])
			'''
            for nc in range(len(lesion_center_predictions)):
                #the output coordination order is [x,y,z], while the order for volume image should be [z,y,x]
                result = [uid]
                result.extend(
                    mt.coord_conversion(lesion_center_predictions[nc][:3],
                                        origin,
                                        old_spacing,
                                        full_scan.shape,
                                        image.shape,
                                        dir_array=False)[::-1])
                if label is not None: result.append(label)
                result.append(lesion_center_predictions[nc][3])
                #results.append(result)
                labeled_results[l].append(result)
            columns = [
                'seriesuid', 'coordX', 'coordY', 'coordZ', 'probability'
            ]
            if label is not None:
                columns.insert(4, 'class')
            result_frame = pd.DataFrame(data=labeled_results[l],
                                        columns=columns)
            result_frame.to_csv("{}/result_{}.csv".format(
                evaluation_path, label),
                                index=False,
                                float_format='%f')
            #np.save("{}/result_{}.npy"%(evaluation_path, label), np.array(results))

            if 'annotation_file' in dir():
                assessment = eva.detection_assessment(labeled_results[l],
                                                      annotation_file,
                                                      label=label)
                if assessment is None:
                    print('assessment failed')
                    #patient_evaluations.write('%d/%d patient %s assessment failed\n' %(p+1, len(test_patients), uid))
                    continue
                #num_scans, FPsperscan, sensitivities, CPMscore, FPsperscan2, sensitivities2, CPMscore2, lesions_detected = assessment
                num_scans = assessment['num_scans']
                FPsperscan, sensitivities = assessment['FROC']
                CPMscore = assessment['CPM']
                prediction_order = assessment['prediction_order']
                lesions_detected = assessment['detection_cites']
                if len(FPsperscan) <= 0 or len(sensitivities) <= 0:
                    print("No results to evaluate, continue")
                else:
                    eva.evaluation_vision(CPMs[l],
                                          num_scans,
                                          FPsperscan,
                                          sensitivities,
                                          CPMscore,
                                          lesions_detected,
                                          output_path=evaluation_path)
                #patient_evaluations.write('%d/%d patient %s CPM score:%f\n' %(p+1, len(test_patients), uid, single_assessment[6]))
                print('Evaluation Done. time:{}s'.format(time.time() -
                                                         start_time))

        labeled_predictions = np.array(labeled_predictions)
        prediction_labels = np.argmax(labeled_predictions, axis=0)
        predictions_fusion = labeled_predictions.sum(axis=0) / 4.0
        fused_center_predictions = nd.prediction_centering_fast(
            predictions_fusion)
        if 'vision_path' in dir() and vision_path is not None:
            np.save(vision_path + "/" + uid + "_classlabels.npy",
                    prediction_labels)
        for lcp in range(len(fused_center_predictions)):
            #the output coordination order is [x,y,z], while the order for volume image should be [z,y,x]
            center = fused_center_predictions[lcp]
            result = [uid]
            result.extend(
                mt.coord_conversion(center[:3],
                                    origin,
                                    old_spacing,
                                    full_scan.shape,
                                    image.shape,
                                    dir_array=False)[::-1])
            result.append(labels[prediction_labels[center[0], center[1],
                                                   center[2]]])
            result.append(center[3])
            results.append(result)
        columns = ['seriesuid', 'coordX', 'coordY', 'coordZ', 'probability']
        if label is not None:
            columns.insert(4, 'class')
        result_frame = pd.DataFrame(data=results, columns=columns)
        result_frame.to_csv(result_path + '/result.csv',
                            index=False,
                            float_format='%f')
        np.save(result_path + '/result.npy', np.array(results))

    print('Overall Detection Done')
from toolbox import Nodule_Detection as nd
from toolbox import Evaluations as eva

try:
	from tqdm import tqdm  # long waits are not fun
except:
	print('tqdm not installed')
	tqdm = lambda x: x

'''
ENVIRONMENT_FILE = "./constants.txt"
IMG_WIDTH, IMG_HEIGHT, NUM_VIEW, MAX_BOUND, MIN_BOUND, PIXEL_MEAN = mt.read_environment(ENVIRONMENT_FILE)
WINDOW_SIZE = min(IMG_WIDTH, IMG_HEIGHT)
NUM_CHANNELS = 3
'''
constants = bt.read_constants("./constants.txt")
# REGION_SIZE = constants["REGION_SIZE"]
MAX_BOUND = float(constants["MAX_BOUND"])
MIN_BOUND = float(constants["MIN_BOUND"])
PIXEL_MEAN = constants["PIXEL_MEAN"]
REGION_SIZE = 40
CANDIDATE_BATCH = 10
RESULT_VISION = False


def precise_detection(volume, candidate_coords, candidate_labels, sess, input_tensor, output_tensor):
	region_size = np.array([REGION_SIZE, REGION_SIZE, REGION_SIZE], dtype=int)
	region_prehalf = np.int_(region_size / 2)
	volume_padded = MAX_BOUND * np.ones(
		(volume.shape[0] + region_size[0], volume.shape[1] + region_size[1], volume.shape[2] + region_size[2]),
		dtype=int)
def detection(**kwargs):
    opt.parse(kwargs)
    test_paths = ["/data/fyl/datasets/Tianchi_Lung_Disease/train"]
    #test_sample_filelist = "/data/fyl/models_pytorch/DensecropNet_detection_test_rfold1/filelist_val_fold0.log"
    #net_file = "/data/fyl/models_pytorch/DensecropNet_stripe_detection_rfold1/DensecropNet_stripe_detection_rfold1_epoch27"
    annotation_file = "/data/fyl/datasets/Tianchi_Lung_Disease/chestCT_round1_annotation.csv"
    #candidate_file = "/data/fyl/datasets/Tianchi_Lung_Disease/candidate.csv"
    evaluation_path = "./experiments_dt/evaluations_tianchild_densecropnet_nodule_rfold1"
    #evaluation_path = "experiments_dt/evaluations_test"
    #vision_path = evaluation_path
    result_file = evaluation_path + "/result.csv"
    hard_negatives_file = evaluation_path + "/hard_negatives.csv"

    region_size = opt.input_size
    batch_size = opt.batch_size
    label_dict = {
        'noduleclass': 1,
        'stripeclass': 5,
        'arterioclass': 31,
        'lymphnodecalclass': 32
    }
    label = label_dict[opt.label_mode]
    use_gpu = opt.use_gpu
    net_file = opt.load_model_path

    if 'vision_path' in dir() and vision_path is not None and not os.access(
            vision_path, os.F_OK):
        os.makedirs(vision_path)
    #if os.access(evaluation_path, os.F_OK): shutil.rmtree(evaluation_path)
    if not os.access(evaluation_path, os.F_OK): os.makedirs(evaluation_path)

    if "test_paths" in dir():
        all_patients = []
        for path in test_paths:
            all_patients += glob(path + "/*.mhd")
        if len(all_patients) <= 0:
            print("No patient found")
            exit()
    else:
        print("No test data")
        exit()
    if hasattr(opt, 'filelists') and 'test' in opt.filelists.keys():
        test_samples = bt.filelist_load(opt.filelists['test'])
        test_uids = []
        for test_sample in test_samples:
            sample_uid = os.path.basename(test_sample).split('_')[0]
            if sample_uid not in test_uids:
                test_uids.append(sample_uid)
        pd.DataFrame(data=test_uids,
                     columns=['series_uid'
                              ]).to_csv(result_path + '/patients_uid.csv',
                                        index=False)
    #else:
    #	for path in opt.filelists['test']:
    #		test_samples = glob(path + '/*.mhd')

    #model = models.DensecropNet(input_size=region_size, drop_rate=0, growth_rate=64, num_blocks=4, num_fin_growth=3).eval()
    model = getattr(models, opt.model)(input_size=region_size,
                                       **opt.model_setup).eval()
    if net_file is not None:
        model.load(net_file)
        print('model loaded from %s' % (net_file))
        shutil.copyfile(net_file,
                        evaluation_path + '/' + net_file.split('/')[-1])
    #model.eval()
    if use_gpu: model.cuda()

    start_time = time.time()
    #patient_evaluations = open(evaluation_path + "/patient_evaluations.log", "w")
    results = []
    CPMs = []
    CPMs2 = []
    hard_negatives = []
    test_patients = all_patients
    #random.shuffle(test_patients)
    bt.filelist_store(test_patients, evaluation_path + "/patientfilelist.log")
    for p in range(len(test_patients)):
        patient = test_patients[p]
        #patient = "./LUNA16/subset9/1.3.6.1.4.1.14519.5.2.1.6279.6001.212608679077007918190529579976.mhd"
        #patient = "./LUNA16/subset9/1.3.6.1.4.1.14519.5.2.1.6279.6001.102681962408431413578140925249.mhd"
        #patient = "./TIANCHI_examples/LKDS-00005.mhd"
        uid = mt.get_mhd_uid(patient)
        if 'test_uids' in dir() and uid not in test_uids:
            print("%d/%d patient %s not belong to test set" %
                  (p + 1, len(test_patients), uid))
            continue
        #if uid!='656867':
        #	continue

        print('%d/%d processing patient:%s' % (p + 1, len(test_patients), uid))
        full_image_info = sitk.ReadImage(patient)
        full_scan = sitk.GetArrayFromImage(full_image_info)
        origin = np.array(full_image_info.GetOrigin(
        ))[::-1]  #the order of origin and old_spacing is initially [z,y,x]
        old_spacing = np.array(full_image_info.GetSpacing())[::-1]
        image, new_spacing = mt.resample(full_scan, old_spacing,
                                         np.array([1, 1, 1]))
        #image = np.load(patient)
        #new_spacing = np.array([1, 1, 1])
        #origin = np.array([0, 0, 0])
        print('Resample Done. time:{}s'.format(time.time() - start_time))

        if 'annotation_file' in dir():
            annotations = mt.get_challenge_annotations(uid, annotation_file,
                                                       label)
            if len(annotations) == 0:
                print("%d/%d patient %s has no annotations, ignore it." %
                      (p + 1, len(test_patients), uid))
                #patient_evaluations.write('%d/%d patient %s has no annotations, ignore it\n' %(p+1, len(test_patients), uid))
                continue
            #make a real lesion visualization
            if 'vision_path' in dir() and vision_path is not None:
                real_lesions = []
                for annotation in annotations:
                    #real_lesion = np.int_([abs(annotation[2]-origin[0])/new_spacing[0], abs(annotation[1]-origin[1])/new_spacing[1], abs(annotation[0]-origin[2])/new_spacing[2]])
                    real_lesion = mt.coord_conversion(annotation[:3][::-1],
                                                      origin,
                                                      old_spacing,
                                                      full_scan.shape,
                                                      image.shape,
                                                      dir_array=True)
                    real_lesions.append(real_lesion)
                annotation_vision = cvm.view_coordinates(image,
                                                         real_lesions,
                                                         window_size=10,
                                                         reverse=False,
                                                         slicewise=False,
                                                         show=False)
                np.save(vision_path + "/" + uid + "_annotations.npy",
                        annotation_vision)

        if 'candidate_file' in dir():
            print('Detection with given candidates:{}'.format(candidate_file))
            candidate_coords = nd.luna_candidate(image,
                                                 uid,
                                                 origin,
                                                 new_spacing,
                                                 candidate_file,
                                                 lung_segment=True,
                                                 vision_path=vision_path)
            if 'vision_path' in dir() and vision_path is not None:
                volume_candidate = cvm.view_coordinates(image,
                                                        candidate_coords,
                                                        window_size=10,
                                                        reverse=False,
                                                        slicewise=False,
                                                        show=False)
                np.save(vision_path + "/" + uid + "_candidate.npy",
                        volume_candidate)
            print('Candidate Done. time:{}s'.format(time.time() - start_time))
            print('candidate number:%d' % (len(candidate_coords)))
            candidate_predictions = nd.precise_detection_pt(
                image,
                region_size,
                candidate_coords,
                model,
                None,
                batch_size,
                use_gpu=use_gpu,
                prediction_threshold=0.4)
            positive_predictions = candidate_predictions > 0
            predicted_coords = np.delete(
                candidate_coords,
                np.logical_not(positive_predictions).nonzero()[0],
                axis=0)
            predictions = candidate_predictions[positive_predictions]
            lesion_center_predictions = nd.prediction_combine(
                predicted_coords, predictions)
            if 'vision_path' in dir() and vision_path is not None:
                volume_predicted = cvm.view_coordinates(image,
                                                        predicted_coords,
                                                        window_size=10,
                                                        reverse=False,
                                                        slicewise=False,
                                                        show=False)
                np.save(vision_path + "/" + uid + "_predicted.npy",
                        volume_predicted)
                lesions = []
                for nc in range(len(lesion_center_predictions)):
                    lesions.append(np.int_(lesion_center_predictions[nc][0:3]))
                volume_prediction = cvm.view_coordinates(image,
                                                         lesions,
                                                         window_size=10,
                                                         reverse=False,
                                                         slicewise=False,
                                                         show=False)
                np.save(vision_path + "/" + uid + "_prediction.npy",
                        volume_prediction)
        else:
            print('Detection with slic candidates')
            candidate_results = nd.slic_candidate(image, 30, focus_area='lung')
            if candidate_results is None:
                continue
            candidate_coords, candidate_labels, cluster_labels = candidate_results
            if 'vision_path' in dir() and vision_path is not None:
                np.save(vision_path + "/" + uid + "_segmask.npy",
                        cluster_labels)
                #segresult = lc.segment_vision(image, cluster_labels)
                #np.save(vision_path + "/" + uid + "_segresult.npy", segresult)
            print('Candidate Done. time:{}s'.format(time.time() - start_time))
            print('candidate number:%d' % (len(candidate_coords)))
            candidate_predictions = nd.precise_detection_pt(
                image,
                region_size,
                candidate_coords,
                model,
                None,
                batch_size,
                use_gpu=use_gpu,
                prediction_threshold=0.4)
            positive_predictions = candidate_predictions > 0
            result_predictions, result_labels = nd.predictions_map_fast(
                cluster_labels, candidate_predictions[positive_predictions],
                candidate_labels[positive_predictions])
            if 'vision_path' in dir() and vision_path is not None:
                np.save(vision_path + "/" + uid + "_detlabels.npy",
                        result_labels)
                np.save(vision_path + "/" + uid + "_detpredictions.npy",
                        result_predictions)
                #detresult = lc.segment_vision(image, result_labels)
                #np.save(vision_path+"/"+uid+"_detresult.npy", detresult)
            lesion_center_predictions = nd.prediction_centering_fast(
                result_predictions)
            #lesion_center_predictions, prediction_labels = nd.prediction_cluster(result_predictions)
            if 'vision_path' in dir() and vision_path is not None:
                lesions = []
                for nc in range(len(lesion_center_predictions)):
                    lesions.append(np.int_(lesion_center_predictions[nc][0:3]))
                volume_predicted = cvm.view_coordinates(result_predictions *
                                                        1000,
                                                        lesions,
                                                        window_size=10,
                                                        reverse=False,
                                                        slicewise=False,
                                                        show=False)
                np.save(vision_path + "/" + uid + "_prediction.npy",
                        volume_predicted)
                if 'prediction_labels' in dir():
                    prediction_cluster_vision = lc.segment_color_vision(
                        prediction_labels)
                    np.save(
                        vision_path + "/" + uid + "_prediction_clusters.npy",
                        prediction_cluster_vision)
        print('Detection Done. time:{}s'.format(time.time() - start_time))
        '''
		#randomly create a result for testing
		lesion_center_predictions = []
		for nc in range(10):
			lesion_center_predictions.append([random.randint(0,image.shape[0]-1), random.randint(0,image.shape[1]-1), random.randint(0,image.shape[2]-1), random.random()])
		'''
        if len(lesion_center_predictions) < 1000:
            print('Nodule coordinations:')
            if len(lesion_center_predictions) <= 0:
                print('none')
            for nc in range(len(lesion_center_predictions)):
                print('{} {} {} {}'.format(lesion_center_predictions[nc][0],
                                           lesion_center_predictions[nc][1],
                                           lesion_center_predictions[nc][2],
                                           lesion_center_predictions[nc][3]))
        for nc in range(len(lesion_center_predictions)):
            #the output coordination order is [x,y,z], while the order for volume image should be [z,y,x]
            result = [uid]
            result.extend(
                mt.coord_conversion(lesion_center_predictions[nc][:3],
                                    origin,
                                    old_spacing,
                                    full_scan.shape,
                                    image.shape,
                                    dir_array=False)[::-1])
            if label is not None: result.append(label)
            result.append(lesion_center_predictions[nc][3])
            results.append(result)
            #results.append([uid, (lesion_center_predictions[nc][2]*new_spacing[2])+origin[2], (lesion_center_predictions[nc][1]*new_spacing[1])+origin[1], (lesion_center_predictions[nc][0]*new_spacing[0])+origin[0], lesion_center_predictions[nc][3]])
            #if len(lesion_center_predictions)<1000:
            #print('{} {} {} {}' .format(lesion_center_predictions[nc][0], lesion_center_predictions[nc][1], lesion_center_predictions[nc][2], lesion_center_predictions[nc][3]))
        columns = ['seriesuid', 'coordX', 'coordY', 'coordZ', 'probability']
        if label is not None:
            columns.insert(4, 'class')
        result_frame = pd.DataFrame(data=results, columns=columns)
        result_frame.to_csv(result_file, index=False, float_format='%.4f')
        np.save(evaluation_path + '/result.npy', np.array(results))

        if 'annotation_file' in dir():
            assessment = eva.detection_assessment(results,
                                                  annotation_file,
                                                  label=label)
            if assessment is None:
                print('assessment failed')
                #patient_evaluations.write('%d/%d patient %s assessment failed\n' %(p+1, len(test_patients), uid))
                continue
            #num_scans, FPsperscan, sensitivities, CPMscore, FPsperscan2, sensitivities2, CPMscore2, lesions_detected = assessment
            num_scans = assessment['num_scans']
            FPsperscan, sensitivities = assessment['FROC']
            CPMscore = assessment['CPM']
            prediction_order = assessment['prediction_order']
            lesions_detected = assessment['detection_cites']
            if len(FPsperscan) <= 0 or len(sensitivities) <= 0:
                print("No results to evaluate, continue")
            else:
                eva.evaluation_vision(CPMs,
                                      num_scans,
                                      FPsperscan,
                                      sensitivities,
                                      CPMscore,
                                      lesions_detected,
                                      output_path=evaluation_path)
            #patient_evaluations.write('%d/%d patient %s CPM score:%f\n' %(p+1, len(test_patients), uid, single_assessment[6]))
            print('Evaluation Done. time:{}s'.format(time.time() - start_time))

            num_positive = (lesions_detected >= 0).nonzero()[0].size
            for ndi in range(len(lesions_detected)):
                if results[prediction_order[ndi]][-1] <= 0.5 or (
                        lesions_detected[:ndi] >=
                        0).nonzero()[0].size == num_positive:
                    break
                if lesions_detected[ndi] == -1:
                    hard_negatives.append(results[prediction_order[ndi]])
            hard_negatives_frame = pd.DataFrame(data=hard_negatives,
                                                columns=columns)
            hard_negatives_frame.to_csv(hard_negatives_file,
                                        index=False,
                                        float_format='%.4f')
            print('Hard Negatives Extracted. time:{}s'.format(time.time() -
                                                              start_time))

    print('Overall Detection Done')
#net_init_name = "lidc_3D_multi_crop_net_64_aug3"

if os.access(net_store_path, os.F_OK):
	shutil.rmtree(net_store_path)
os.makedirs(net_store_path)
if os.access(tensorboard_path, os.F_OK):
	shutil.rmtree(tensorboard_path)

#data arrangement
if 'net_init_name' in dir():
	net_init_path = load_path + "/" + net_init_name
	net_init_file = net_init_path + "/epoch12/epoch12"
	filelist_train_path = net_init_path + "/filelist_train.log"
	filelist_val_path = net_init_path + "/filelist_val.log"
	filelist_test_path = net_init_path + "/filelist_test.log"
	train_files = bt.filelist_load(filelist_train_path)
	val_files = bt.filelist_load(filelist_val_path)
	test_files = bt.filelist_load(filelist_test_path)
else:
	lidc_dir="../data_samples/lidc_cubes_64_overbound_ipris"
	files_lists, _ = lt.filelist_training(lidc_dir, shuffle=True, cross_fold=5, test_fold=5)
	train_files = files_lists['train']
	val_files = files_lists['val']
	test_files = files_lists['test']
bt.filelist_store(train_files, net_store_path + '/' + 'filelist_train.log')
bt.filelist_store(val_files, net_store_path + '/' + 'filelist_val.log')
bt.filelist_store(test_files, net_store_path + '/' + 'filelist_test.log')

train_num = len(train_files)
val_num = len(val_files)
#train_num = 32
Example #20
0
def get_filelists_patientwise(patient_uids=None,
                              filelists=None,
                              fileext='npy',
                              datasetidx='',
                              config={}):
    save_path = config['model_root'] + '/' + config['env']
    if filelists is None:
        filelistdict = {}
        filelistdict['train'] = bt.filelist_load(
            config['filelists' + str(datasetidx)]['train'])
        filelistdict['val'] = bt.filelist_load(config['filelists' +
                                                      str(datasetidx)]['val'])
        bt.filelist_store(filelistdict['train'],
                          save_path + '/filelist_train.log')
        bt.filelist_store(filelistdict['val'], save_path + '/filelist_val.log')
        if 'test' in config['filelists' + str(datasetidx)].keys():
            filelistdict['test'] = bt.filelist_load(
                config['filelists' + str(datasetidx)]['test'])
            bt.filelist_store(filelistdict['test'],
                              save_path + '/filelist_test.log')
        print("filelist loaded")
    else:
        #filelists=["/home/fyl/datasets/luna_64/train", "/home/fyl/datasets/luna_64/test", "/home/fyl/datasets/npy_non_set"]
        #filelist = []
        if patient_uids is None:
            patient_uids = []
            for filelist in filelists:
                files = glob.glob(filelist + '/*.' + fileext)
                #filelist.extend(files)
                for file in files:
                    filename = os.path.basename(file)
                    filenamenoext = os.path.splitext(filename)[0]
                    fileinfos = filenamenoext.split('_')
                    patient_uid = fileinfos[0]
                    if patient_uid not in patient_uids:
                        patient_uids.append(patient_uid)
        elif type(patient_uids) == str:
            patient_uids = bt.filelist_load(patient_uids)
        if config['filelist_shuffle']: random.shuffle(patient_uids)
        bt.filelist_store(
            patient_uids,
            save_path + '/patientlist' + str(datasetidx) + '.log')
        #bt.filelist_store('luna16samplelist.log', filelist)
        patient_folds, folddict = bt.foldlist(
            patient_uids, config['num_cross_folds'], {
                'val': config['val_fold' + str(datasetidx)],
                'test': config['test_fold' + str(datasetidx)]
            })
        filelist_overall = []
        filelistdict = {}
        for setname in patient_folds.keys():
            filelistdict[setname] = []
            '''
			print("filelist {} generating" .format(setname))
			for patient_uid in tqdm(patient_folds[setname]):
				for filelist in filelists:
					if os.path.isfile(filelist):
						files = bt.filelist_load(filelist)
					else:
						files = glob.glob(filelist+'/%s*.%s' %(patient_uid, fileext))
					if 'lidc' in filelist and hasattr(opt, 'remove_uncertain') and opt.remove_uncertain:
						files = lt.filelist_remove_uncertain(files)
					filelist_overall.extend(files)
					filelistdict[setname].extend(files)
			bt.filelist_store(filelistdict[setname], save_path+'/filelist'+str(datasetidx)+'_'+setname+'_fold'+str(folddict[setname])+'.log')
			'''
        for filelist in filelists:
            if os.path.isfile(filelist):
                files = bt.filelist_load(filelist)
            else:
                files = glob.glob(filelist + '/*.%s' % (fileext))
            filelist_overall.extend(files)
            if 'lidc' in filelist and 'remove_uncertain' in config.keys(
            ) and config['remove_uncertain']:
                filelist_overall = lt.filelist_remove_uncertain(
                    filelist_overall)
        if config['filelist_shuffle']: random.shuffle(filelist_overall)
        for file in filelist_overall:
            patient_uid = os.path.basename(file).split('_')[0]
            for setname in patient_folds.keys():
                if patient_uid in patient_folds[setname]:
                    filelistdict[setname].append(file)
        for setname in patient_folds.keys():
            bt.filelist_store(
                filelistdict[setname],
                save_path + '/filelist' + str(datasetidx) + '_' + setname +
                '_fold' + str(folddict[setname]) + '.log')
        bt.filelist_store(filelist_overall,
                          save_path + '/filelist' + str(datasetidx) + '.log')
        print("filelist generated")
    return filelistdict