def gen_gold_feature_csv(gold_dir,output_gold_csv_file,output_gold_feature_csv): #sorted_GMR_57C10_AD_01-1xLwt_attp40_4stop1-m-A02-20111101_2_F3-left_optic_lobe.v3draw.extract_6.v3dpbd.ano_stamp_2015_06_17_12_23.swc gold_files = glob.glob(os.path.join(gold_dir, '*.swc')) df_gold = pd.DataFrame() images = [] gold_swc_files=[] for file in gold_files: if file.find(".v3dpbd") > -1: image = file.split('.v3dpbd')[0]+".v3dpbd" else: image = file.split('.v3draw')[0]+".v3draw" image = image.split('sorted_')[-1] images.append(image) gold_swc_files.append(file) df_gold['image_file_name'] = pd.Series(images) df_gold['gold_swc_file'] = pd.Series(gold_swc_files) df_gold.to_csv(output_gold_csv_file, index=False) # generate ano file for feature calcuation out_sorted_ANO = gold_dir+"/sorted.ano" bn.genLinkerFile(gold_dir , out_sorted_ANO) out_feature_file = gold_dir + "/features.nfb" bn.batch_compute (out_sorted_ANO,out_feature_file) generateALLFeatureCSV_gold166(out_feature_file, output_gold_feature_csv) return
def gen_gold_feature_csv(gold_dir, output_gold_csv_file, output_gold_feature_csv): #sorted_GMR_57C10_AD_01-1xLwt_attp40_4stop1-m-A02-20111101_2_F3-left_optic_lobe.v3draw.extract_6.v3dpbd.ano_stamp_2015_06_17_12_23.swc gold_files = glob.glob(os.path.join(gold_dir, '*.swc')) df_gold = pd.DataFrame() images = [] gold_swc_files = [] for file in gold_files: if file.find(".v3dpbd") > -1: image = file.split('.v3dpbd')[0] + ".v3dpbd" else: image = file.split('.v3draw')[0] + ".v3draw" image = image.split('sorted_')[-1] images.append(image) gold_swc_files.append(file) df_gold['image_file_name'] = pd.Series(images) df_gold['gold_swc_file'] = pd.Series(gold_swc_files) df_gold.to_csv(output_gold_csv_file, index=False) # generate ano file for feature calcuation out_sorted_ANO = gold_dir + "/sorted.ano" bn.genLinkerFile(gold_dir, out_sorted_ANO) out_feature_file = gold_dir + "/features.nfb" bn.batch_compute(out_sorted_ANO, out_feature_file) generateALLFeatureCSV_gold166(out_feature_file, output_gold_feature_csv) return
def cal_bn_features(preprocessed_dir): preprocessed_ANO = preprocessed_dir + "/preprocessed.ano" bn.genLinkerFile(preprocessed_dir, preprocessed_ANO) ##batch computing generate features feature_file = preprocessed_dir+'/features.nfb' bn.batch_compute(preprocessed_ANO, feature_file) ### convert feature file into csv file nfb.generateALLFeatureCSV(feature_file, preprocessed_dir + '/features_with_tags.csv') return
def cal_bn_features(preprocessed_dir): preprocessed_ANO = preprocessed_dir + "/preprocessed.ano" bn.genLinkerFile(preprocessed_dir, preprocessed_ANO) ##batch computing generate features feature_file = preprocessed_dir + '/features.nfb' bn.batch_compute(preprocessed_ANO, feature_file) ### convert feature file into csv file nfb.generateALLFeatureCSV(feature_file, preprocessed_dir + '/features_with_tags.csv') return
def main(): ############################################################################### preprocessing = 0 janelia = 0 taiwan = 1 if taiwan: data_DIR = "/data/mat/xiaoxiaol/data/big_neuron/consensus_all/taiwan" original_dir = data_DIR + "/consensus_0330_anisosmooth" db_tags_csv_file = data_DIR + '/taiwan_smooth_features_with_tags.csv' if janelia: data_DIR = "/data/mat/xiaoxiaol/data/big_neuron/consensus_all/janelia_set1" original_dir = data_DIR + "/consensus_0330_anisosmooth" db_tags_csv_file = data_DIR + '/j1_smooth_features_with_tags.csv' ############################################################################### print original_dir preprocessed_dir = data_DIR + "/preprocessed_consensus_smooth" if not os.path.exists(preprocessed_dir): os.system("mkdir -p " + preprocessed_dir) if preprocessing == 1: #preprocssing alignment count = 0 qsub_folder = "/data/mat/xiaoxiaol/work/qsub" os.system("rm " + qsub_folder + "/*.qsub") os.system("rm " + qsub_folder + "/*.o*") os.system("rm " + qsub_folder + "/jobs.txt") for input_swc_path in glob.glob(original_dir + "/*.eswc"): swc_fn = input_swc_path.split('/')[-1] preprocessed_swc_fn = preprocessed_dir + '/' + swc_fn if not os.path.exists(preprocessed_swc_fn): bn.pre_processing(input_swc_path, preprocessed_swc_fn, 1, qsub_folder, count) count = count + 1 exit() #run jobs on pstar print "done" preprocessed_ANO = preprocessed_dir + "/preprocessed.ano" bn.genLinkerFile(preprocessed_dir, preprocessed_ANO) ##batch computing generate features feature_file = preprocessed_dir + '/features.nfb' bn.batch_compute(preprocessed_ANO, feature_file) ### convert feature file into csv file nfb.generateALLFeatureCSV(feature_file, db_tags_csv_file) return
def main(): ############################################################################### preprocessing =0 janelia =0 taiwan=1 if taiwan: data_DIR = "/data/mat/xiaoxiaol/data/big_neuron/consensus_all/taiwan" original_dir = data_DIR + "/consensus_0330_anisosmooth" db_tags_csv_file = data_DIR + '/taiwan_smooth_features_with_tags.csv' if janelia: data_DIR = "/data/mat/xiaoxiaol/data/big_neuron/consensus_all/janelia_set1" original_dir = data_DIR + "/consensus_0330_anisosmooth" db_tags_csv_file = data_DIR + '/j1_smooth_features_with_tags.csv' ############################################################################### print original_dir preprocessed_dir = data_DIR + "/preprocessed_consensus_smooth" if not os.path.exists(preprocessed_dir): os.system("mkdir -p " + preprocessed_dir) if preprocessing==1: #preprocssing alignment count=0 qsub_folder= "/data/mat/xiaoxiaol/work/qsub" os.system("rm "+qsub_folder+"/*.qsub") os.system("rm "+qsub_folder+"/*.o*") os.system("rm "+qsub_folder+"/jobs.txt") for input_swc_path in glob.glob(original_dir + "/*.eswc"): swc_fn = input_swc_path.split('/')[-1] preprocessed_swc_fn = preprocessed_dir+'/' + swc_fn if not os.path.exists(preprocessed_swc_fn): bn.pre_processing(input_swc_path, preprocessed_swc_fn,1,qsub_folder,count) count=count+1 exit() #run jobs on pstar print "done" preprocessed_ANO = preprocessed_dir + "/preprocessed.ano" bn.genLinkerFile(preprocessed_dir, preprocessed_ANO) ##batch computing generate features feature_file = preprocessed_dir+'/features.nfb' bn.batch_compute(preprocessed_ANO, feature_file) ### convert feature file into csv file nfb.generateALLFeatureCSV(feature_file, db_tags_csv_file) return
def cal_bn_features(input_dir,results_feature_csv): #results_feature_csv = sorted_dir +'/features_with_tags.csv' input_ANO = input_dir+"/input.ano" bn.genLinkerFile( input_dir, input_ANO) ##batch computing feature_file = input_dir+ "/features.nfb" bn.batch_compute (input_ANO,feature_file) print "output feature file:"+feature_file print "output ano file:"+input_ANO generateALLFeatureCSV_gold166(feature_file,results_feature_csv) print "output features with tag:"+ results_feature_csv return
def cal_bn_features(input_dir, results_feature_csv): #results_feature_csv = sorted_dir +'/features_with_tags.csv' input_ANO = input_dir + "/input.ano" bn.genLinkerFile(input_dir, input_ANO) ##batch computing feature_file = input_dir + "/features.nfb" bn.batch_compute(input_ANO, feature_file) print "output feature file:" + feature_file print "output ano file:" + input_ANO generateALLFeatureCSV_gold166(feature_file, results_feature_csv) print "output features with tag:" + results_feature_csv return