#model_name = r'birdnet_dropout_conv' if args.model_name is None: #model_name = "lenet" model_name = 'birdnet' else: model_name = args.model_name print('Model name: ' + model_name) if args.dataset_name is None: dataset_name = "Data_2" else: dataset_name = args.dataset_name print('Dataset name: ' + dataset_name) ################################## # DATASET & MODEL config = Config(dataset=dataset_name, model=model_name) #config = Config(dataset='Data_2', model='lenet') #config = Config(dataset='Data_2', model='birdnet') # failed a couple of times, probably needs augmentation #config = Config(dataset='Data_2', model='lenet_dropout_conv') #config = Config(dataset='Data_2', model='birdnet_dropout_conv') ################################## if args.augment is True: config.DO_AUGMENT = True print('Data augmentation is used for conv-based nets.\n') else: config.DO_AUGMENT = False print('No data augmentation is used for conv-based nets.\n') ################################## if (model_name == 'recurr_lstm') or (model_name == r'conv1d_gru'):
""" import os, sys ROOT_DIR = os.path.abspath("/home/kpalmer/AnacondaProjects/upcall-basic-net") # the path where upcall-basic-net is sys.path.append(ROOT_DIR) from upcall.accuracy_measure import map_build_day_file import glob, os from upcall.config import Config from upcall.full_process import full_process_interfaceDSP, full_process_interface from upcall.RunDetectorDSPdependency import MakeSoundStream #import matplotlib.pyplot as plt # parameter config = Config() DATA_DIR = r'/home/ys587/__Data' # truth data: TruthFolder = r'/home/ys587/__Data/__TruthLabelVAPlusStAndrew' TruthFolder = r'/cache/kpalmer/quick_ssd/data/dclmmpa2013/LogsWithNewSNR/RavenST/NOPPSet2' day_file_map = map_build_day_file(TruthFolder) # testing sound stream of St Andrew DCL SoundPath = os.path.join(DATA_DIR, r'DCL_St_Andrew/Sound_3_days') day_list = sorted(glob.glob(os.path.join(TruthFolder + '/', '*'))) SoundFileLoc = '/cache/kpalmer/quick_ssd/data/dclmmpa2013/Testing/Upcalls_NOPPset2' file_days = os.listdir(SoundFileLoc)
# create a datafram to hold precision-recall cuver of all runs; for the seaborn pd_prc_list = [] for ii in range(len(pre_rec_list)): #pre_rec_array_list.append(np.array(pre_rec_list[ii])) pd_prc = pd.DataFrame(np.array(pre_rec_list[ii]).T, columns=['mpre', 'mrec']) pd_prc['run'] = ii pd_prc_list.append(pd_prc) pd_prc_all_runs = pd.concat(pd_prc_list) pd_prc_all_runs.to_csv(os.path.join(seltab_detect_path, 'pre_rec.txt'), sep='\t') if __name__ == "__main__": config = Config() config.NUM_RUNS = 10 # DCL13 truth_folder = os.path.join( DATA_DIR, 'DCL_St_Andrew/Sound_3_days_seltab') # DCL13 test dataset func_folder = '__full_data' # could be __full_data_large as well # seltab_detect_path = r'/home/ys587/__ExptResult/__V4_Paper' seltab_detect_path = r'/home/ys587/__ExptResult/__V4_Paper/__result_only_20190114/__negtive_mining' model_path = glob.glob(seltab_detect_path + '/cv_*') model_path.sort() for mm in model_path: print(os.path.basename(mm)) seltab_detect_path = os.path.join(mm, func_folder)
# the path where upcall-basic-net is sys.path.append(ROOT_DIR) from upcall.config import Config from upcall.train_classifier import lenet, lenet_dropout_input, \ lenet_dropout_conv, lenet_dropout_input_conv, birdnet, birdnet_dropout_input, \ birdnet_dropout_conv, birdnet_dropout_input_conv from upcall.train_classifier import net_train, model_preprocess from script.DCLDE2018_train_data import prepare_truth_data import time start_time = time.time() #config = Config(dataset='Data_4', model='birdnet_dropout_input_conv') config = Config(dataset='Data_1', model='lenet') #config = Config('Data_1', 'Lenet_dropout_conv') # model of the test case 38-0.8361.hdf5 ################################## # Model of DCLDE 2018 # Build models here ################################## try: # use model name in string as function name to generate the model func_model_generate = globals()[config.MODEL] model = func_model_generate(config) except: print( "ModelError: Model name is either incorrect or model is not accepted.") sys.exit() #if config.MODEL == r'lenet':
from upcall.train_classifier import lenet, lenet_dropout_input, \ lenet_dropout_conv, lenet_dropout_input_conv, birdnet, birdnet_dropout_input, \ birdnet_dropout_conv, birdnet_dropout_input_conv from upcall.train_classifier import net_train, model_preprocess, net_train_augment from script.DCLDE2018_train_data import prepare_truth_data import time start_time = time.time() ################################## # select a dataset to train the classifier and a model to run. ################################## #config = Config(dataset='Data_2', model='birdnet') #config = Config(dataset='Data_4', model='birdnet_dropout_input_conv') config = Config(dataset='Data_2', model='birdnet_dropout_conv') #config = Config(dataset='Data_2', model='lenet') #config = Config(dataset='Data_2', model='lenet_dropout_conv') #config = Config('Data_1', 'Lenet_dropout_conv') # model of the test case 38-0.8361.hdf5 ################################## # Classifier training ################################## # Model of DCLDE 2018 # Build models here try: # use model name in string as function name to generate the model func_model_generate = globals()[config.MODEL] model = func_model_generate(config) except: print( "ModelError: Model name is either incorrect or model is not accepted.")
Created on 7/11/19 @author: atoultaro """ import os import glob from upcall.config import Config from upcall.accuracy_measure import calc_TP_FP_FN, map_build_day_file, \ get_date_str result_path = os.path.join( '/home/ys587/__ExptResult/cv_lenet_dropout_conv_train_Data_2_augment', '__full_data_large') DATA_DIR = r'/home/ys587/__Data' # Data truth_folder = os.path.join(DATA_DIR, '__large_test_dataset/__truth_seltab') config = Config(dataset='Data_2', model='lenet_dropout_conv') config.NUM_RUNS = 1 for rr in range(config.NUM_RUNS): day_list = sorted( glob.glob( os.path.join(result_path, 'Run' + str(rr), '__TP_FN_FP', '*.txt'))) # find selection table for all days print(rr) for dd in day_list: basename = os.path.basename(dd) map_day_file = map_build_day_file(truth_folder) truth_file = map_day_file[get_date_str(basename)] accu_result, _ = calc_TP_FP_FN(dd, truth_file, result_path, config) # TP, FP, TP2, FN
start_time = time.time() proj_path = u'/mnt/drive_W/projects/2018_ORStateU_CCB_85941/Dep01' sound_dir = os.path.join(proj_path, r'AIFFs_UTCz') # sound_dir = r'/mnt/drive_W/projects/2018_ORStateU_CCB_85941/Dep01/detection_results/__sound_test' # for test model_path = os.path.join( proj_path, r'detection_results/__model/lenet_dropout_conv_train_Data_6_augment_Run0_epoch_100_F1_0.8733.hdf5' ) # seltab_detect_output = os.path.join(proj_path, r'detection_results/__seltab') seltab_detect_output = os.path.join(proj_path, r'detection_results/__seltab_test_drive') best_model_path, best_accu = find_best_model(model_path) config = Config(dataset=None, model=u"lenet_dropout_conv") config.SCORE_THRE = 0.5 print('Detecting:') day_list = os.listdir(sound_dir) day_list2 = [] for dd in day_list: day_list2.append(sound_dir + '/' + dd) day_list = day_list2 del day_list2 day_list.sort() # day_list = day_list[1:] # avoid the first one, 20190216 has problem # day_list = day_list[0] day_list_err = [day_list[ii] for ii in [0]] # debugging 20190215 & 20190612 day_list = day_list_err