import SimpleITK as sitk

#######################

# Parse the command line

from neon.util.argparser import NeonArgparser

parser = NeonArgparser(__doc__)

# We can pass the input directory and output file name from the command line
parser.add_argument('-out',
                    '--outFilename',
                    default='dicom_out.h5',
                    help='Name of the output HDF5 file')
parser.set_defaults(data_dir='/Volumes/data/tonyr/dicom/Lung CT/stage1')
parser.set_defaults(save_path='.')
args = parser.parse_args()

data_dir = args.data_dir
outFilename = args.save_path + '/' + args.outFilename

##############################


def verbosePrint(txt):

    print(txt)


verbosePrint('DICOM to HDF5 converter started ... ')
Beispiel #2
0
        output (str): output file path
        predictions:
            the model's predictions
    """
    results_list = predictions.tolist()
    with open(output, 'w', encoding='utf-8') as out_file:
        writer = csv.writer(out_file, delimiter=',', quotechar='"')
        for result in results_list:
            writer.writerow([result])
    print("Results of inference saved in {0}".format(output))


if __name__ == "__main__":
    # parse the command line arguments
    parser = NeonArgparser()
    parser.set_defaults(epochs=200)
    parser.add_argument('--data', help='prepared data CSV file path',
                        type=validate_existing_filepath)
    parser.add_argument('--model', help='path to the trained model file',
                        type=validate_existing_filepath)
    parser.add_argument('--print_stats', action='store_true', default=False,
                        help='print evaluation stats for the model predictions - if '
                        'your data has tagging')
    parser.add_argument('--output', help='path to location for inference output file',
                        type=validate_parent_exists)
    args = parser.parse_args()
    data_path = absolute_path(args.data)
    model_path = absolute_path(args.model)
    print_stats = args.print_stats
    output_path = absolute_path(args.output)
    # generate backend
parser.set_defaults(

       #constant arguments
       rng_seed=2,
       backend= "cpu",
       progress_bar=True,
       verbose=4,        
       evaluation_freq=2,

       #data
       epochs= 10,
       batch_size=128,
       data_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/DATA/",
       file_name="hurricanes.h5",
       nclass=2,
       data_dict=["1","0"],
       norm_type=2, #1: global contrast norm, 2:standard norm, 3:l1/l2 norm, scikit learn
           

       train_num_p=8000,
       valid_num_p=1000,
       test_num_p=1000,
       train_num_n=8000,
       valid_num_n=1000,
       test_num_n=1000,


       # results
       #out_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane/",
       #save_path="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane/hurricane_classify_train.pkl",
       #serialize= 2,
       #logfile="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane/hurricane_classify_train."+c_time+".log",
       #output_file="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane/hurricane_classify_train."+c_time+".h5",
) 
parser.set_defaults(

       #constant arguments
       rng_seed=2,
       backend= "cpu",
       progress_bar=True,
       verbose=4,        
       evaluation_freq=2,

       #data
       epochs= 2,
       batch_size=100,
       data_dir="./TEST_DATA",
       file_name="hurricanes.h5",
       nclass=2,
       data_dict=["1","0"],
       norm_type=2, #1: global contrast norm, 2:standard norm, 3:l1/l2 norm, scikit learn

       train_num_p=1000,
       valid_num_p=1000,
       test_num_p=1000,
       train_num_n=1000,
       valid_num_n=1000,
       test_num_n=1000,

       # results
       out_dir="./TEST_RESULTS",
       save_path="./TEST_RESULTS/hurricane_classify_train.pkl",
       serialize= 2,
       logfile="./TEST_RESULTS/hurricane_classify_train."+c_time+".log",
       output_file="./TEST_RESULTS/hurricane_classify_train."+c_time+".h5",
) 
parser.set_defaults(
       #constant arguments
       rng_seed=2,
       backend= "cpu",
       dataype="f32",
       progress_bar=True,
       verbose=4,
       #evaluation_freq=3,
           
       #variable arguments
       epochs= 10,
       batch_size=100,
       #data_dir="/global/project/projectdirs/nervana/yunjie/climatedata/new_landsea/",
       #file_name="atmosphericriver_us+TMQ+land_Sep4.h5",
       data_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/DATA/",
       file_name="atmospheric_river_us+eu+landsea_sep10.h5",
       nclass=2,    #number of event category to classify, 
       data_dict=["AR","Non_AR"],
       norm_type=3, #1: global contrast norm, 2:standard norm, 3:l1/l2 norm, scikit learn

       #TODO, make the "nclass" reading from input files, more general

       train_num_p=5000, #positive training example
       valid_num_p=800,
       test_num_p=1000,
       train_num_n=5000, #negative training example
       valid_num_n=500,
       test_num_n=1000,        
       
       #output files
       #out_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/atmosphericriver/",
       #save_path="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/atmosphericriver/ar_classify_train.pkl",
       #serialize= 2,
       #logfile="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/atmosphericriver/ar_classify_train."+c_time+".log",
       #output_file="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/atmosphericriver/ar_classify_train."+c_time+".h5",
)
for a in argss:
    parser.add_argument(a)

parser.set_defaults(
       #constant arguments
       #rng_seed=2,
       backend= "cpu",
       dataype="f32",
       progress_bar=True,
       log_thresh=10,        
       #variable arguments
       epochs= 15,
       batch_size=100,
       data_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/DATA/",
       file_name="fronts_all.h5",
       nclass=2,
       data_dict=["Front","NonFront"],
       norm_type=2, #1: global contrast norm, 2:standard norm, 3:l1/l2 norm, scikit learn

       #TODO, make the "nlcass" reading from input data, more general

       #follow 80%  20% rule below
       train_num_p=4000,
       valid_num_p=600,
       test_num_p=1000,
       train_num_n=4000,
       valid_num_n=600,
       test_num_n=1000
)

args = parser.parse_args()
parser.set_defaults(

       #constant arguments
       rng_seed=2,
       backend= "cpu",
       progress_bar=True,
       verbose=4,        
       evaluation_freq=2,

       #data
       epochs= 1,
       batch_size=100, ####when testing, make the batch size equal to test data length (easier for later confusion matrix and feature sample)

       data_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/DATA/",
       file_name="hurricanes.h5",
       nclass=2,
       data_dict=["1","0"],
       norm_type=2, #1: global contrast norm, 2:standard norm, 3:l1/l2 norm, scikit learn
           

       train_num_p=8000,
       valid_num_p=1000,
       test_num_p=1000,
       train_num_n=8000,
       valid_num_n=1000,
       test_num_n=1000,


       # results
       #out_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/",
       #save_path="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane_classify_train_S.pkl",
       #serialize= 2,
       logfile="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane/hurricane_classify_test."+c_time+".log",
       #output_file="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane_classify_S."+c_time+".h5",
       model_file="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/hurricane/hurricane_classify_train.pkl",
) 
parser.set_defaults(
       #constant arguments
       rng_seed=3,
       backend= "cpu",
       dataype="f32",
       progress_bar=True,
       verbose=4,
       #evaluation_freq=1,
       
       #variable arguments
       epochs= 15,
       batch_size=100,
       data_dir="/global/project/projectdirs/nervana/yunjie/climatedata/old/",
       file_name="fronts_all.h5",
       nclass=2,
       data_dict=["Front","NonFront"],
       norm_type=2, #1: global contrast norm, 2:standard norm, 3:l1/l2 norm, scikit learn

       #TODO, make the "nlcass" reading from input data, more general

       #follow 80%  20% rule below
       train_num_p=4000,
       valid_num_p=600,
       test_num_p=1000,
       train_num_n=4000,
       valid_num_n=600,
       test_num_n=1000,

       # results
       #out_dir="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/fronts/",
       #save_path="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/fronts/fronts_classify_train.pkl",
       #serialize= 2,
       #logfile="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/fronts/fronts_classify_train."+c_time+".log",
       #output_file="/global/project/projectdirs/nervana/yunjie/climate_neon1.0run/conv/RESULTS/fronts/fronts_classify_train."+c_time+".h5",
)