Exemple #1
0
import time
import os
import sys

sys.path.append(r"U:\Software\biotas")
import biotas
import warnings

warnings.filterwarnings('ignore')

t0 = time.time()
# What receiver type are you assessing accuracy for?
recType = 'orion'  # what is the receiver type?
proj_dir = r'E:\Manuscript\CT_River_2015'  # what is the project directory?
dbName = 'ctr_2015_v2.db'  # what is the name of the project database
projectDB = os.path.join(proj_dir, 'Data', dbName)
scratch_dir = os.path.join(proj_dir, 'Output', 'Scratch')
figure_ws = os.path.join(proj_dir, 'Output', 'Figures')
rec_list = ['T13', 'T18', 'T21', 'T22']

train_stats = biotas.training_results(recType, projectDB, figure_ws)  #,site)
train_stats.train_stats()
class_stats = biotas.classification_results(
    recType, projectDB, figure_ws,
    rec_list=rec_list)  #,reclass_iter = class_iter)
class_stats.classify_stats()
Exemple #2
0
fields = ['conRecLength','hitRatio','noiseRatio','power','lagDiff']            # enter the fields you wish to classify on from list above

# Do we want to use an informed prior?
prior = True                                                                   # enter whether or not you wish to use an informed prior, if not a 50/50 split is used and the classifier behaves like Maximum Likelihood                                                         
print ("Set Up Complete, Creating Histories")

# get the fish to iterate over with SQL 
conn = sqlite3.connect(projectDB)
c = conn.cursor()
sql = "SELECT FreqCode FROM tblRaw WHERE recID == '%s';"%(site)
histories = pd.read_sql(sql,con = conn)
tags = pd.read_sql("SELECT FreqCode, TagType FROM tblMasterTag WHERE TagType == 'Study'", con = conn)
histories = histories.merge(right = tags, left_on = 'FreqCode', right_on = 'FreqCode')
histories = histories[histories.TagType == 'Study'].FreqCode.unique()
c.close()
print ("There are %s fish to iterate through at site %s" %(len(histories),site))

# create training data for this round of classification
train = biotas.create_training_data(site,projectDB)

# create a training object and classify each specimen
for i in histories:
    class_dat = biotas.classify_data(i,site,fields,projectDB,scratch_ws,train,informed_prior = prior)
    biotas.calc_class_params_map(class_dat)   
    print ("Fish %s classified"%(i))
print ("Detections classified!")
biotas.classDatAppend(site, scratch_ws, projectDB)

print ("process took %s to compile"%(round(time.time() - tS,3)))
class_stats = biotas.classification_results(recType,projectDB,figure_ws,site)
class_stats.classify_stats()
Exemple #3
0
    # create list of training data objects to iterate over with a Pool multiprocess

    conn = sqlite3.connect(trainingDB)
    c = conn.cursor()
    sql = "SELECT * FROM tblTrain WHERE recID == '%s';" % (site)
    tblTrainDF = pd.read_sql(sql, con=conn)
    c.close()

    iters = []
    for j in histories:
        iters.append(
            biotas.classify_data(j,
                                 ant_to_rec_dict[i],
                                 fields,
                                 projectDB,
                                 outputScratch,
                                 training_data=tblTrainDF,
                                 training=trainingDB))
    print("Finished creating history objects")
    for k in iters:
        biotas.calc_class_params_map(k)
    print("Detections classified!")
    biotas.classDatAppend(site, outputScratch, projectDB)
    print("process took %s to compile" % (round(time.time() - tS, 3)))
    # generate summary statistics for classification by receiver type
    class_stats = biotas.classification_results(recType,
                                                projectDB,
                                                figure_ws,
                                                rec_list=[ant_to_rec_dict[i]])
    class_stats.classify_stats()