예제 #1
0
async3secSegments=Segmentation("None",periodSync=False,sizeType="fixed",frameSizeMs=3000.0,hopSizeMs=1000.0)
segStrategies=[async2secSegments,async3secSegments]

#Define features to be used
features=[]
for featName in ['SubEnv']:#other options: 'MFCC','MelSpec'
    for segType in segStrategies:
        for timeDim in [32,64]:
            for freqDim in [16]:
                features.append(Feature(featName,[timeDim,freqDim],"frame",segType,involveDelta=False))


#Define data specifications for this database
data=Data(dbaName,dataFolder,featureFolder,features,useBalancedData,splitRatios,info)
#Defining NN model with a name. 
#   Implementation is in models.py. Feel free to add your own models and 
#   test by just changing the name here
modelNames=['uocSeq1','uocSeq2']

#Running random split and testing several times (1/testSetPercentage)
# ex: if test set is 20%, tests will be repeated 5 times
numExperiments=int(1/splitRatios[-1])
for i in range(numExperiments):
    for modelName in modelNames:
        #Define test specifications and run
        singleTest=Test(modelName,data,resultsFolder,batch_size=128,num_epochs=50)
        #Run the tests: outputs will be put in the results folder
        singleTest.run()
        #Cleaning this test sessions' intermediate files
    cleanFilesOfPrevSess([dataFolder])
예제 #2
0
def test_agent(args, shared_value, share_net):

    test = Test(args, shared_value, share_net)
    test.run()
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from utils import get_device
from dataset.DatasetHelper import DatasetHelper
from Test import Test

if __name__ == '__main__':
    train = Training()

    train.train(n_epochs=5,
                batch_size=64,
                model_type=ModelType.cnn,
                criterion=nn.CrossEntropyLoss(),
                learning_rate=0.001)

    train.train(n_epochs=5,
                batch_size=64,
                model_type=ModelType.fine_tuned,
                criterion=nn.BCELoss(),
                learning_rate=0.001,
                enable_scheduler=True)

    test = Test()
    test.run()
예제 #4
0
파일: Main.py 프로젝트: idthanm/apex_sac
def test_agent(args, shared_queue,shared_value):

    test = Test(args, shared_queue,shared_value)
    test.run()
예제 #5
0
def takeTest():
    chapter = input("what chapter would you like to test on")
    test = Test(chapter)
    score = test.run()
    student.saveScore(chapter, score)
예제 #6
0
def main(fn):
    # data= pd.read_csv("../data/kddcup.data_10_percent_corrected", names=cols)
    data = pd.read_csv(fn, header=-1)

    # data= remove_missing(data)
    # data= impute_missing(data)
    data = impute_missing2(data)

    # Features to be used in classification
    features = [x for x in range(1, len(data.columns))]

    X = data[features]
    y = data[0]

    #h= TGaussianNB(X, y)
    #h.run()
    print("GaussianNB")
    h = Test(X, y, GaussianNB())
    h.run()
    h.report(fn="../Report/results/cancer.gnb.cm.tex")
    s = Search(X, y, GaussianNB(), [{}])
    s.search()
    s.report("../Report/results/cancer.gnb.tex")

    print("DTree neu")
    parameters = [{
        'criterion': ['gini', 'entropy'],
        'max_features': ['auto', 'sqrt', 'log2']
    }]
    s = Search(X, y, DTree(), parameters)
    s.search()
    s.report("../Report/results/cancer.dt.tex")
    h = Test(X, y,
             DTree(max_features='log2', criterion='gini', random_state=1234))
    h.run()
    h.report(fn="../Report/results/cancer.dt.cm.tex")
    print("RF")
    parameters = [{
        'n_estimators': range(1, 15),
        'criterion': ['gini', 'entropy'],
        'max_features': ['auto', 'sqrt', 'log2']
    }]
    s = Search(X, y, RandomForestClassifier(), parameters)
    s.search()
    s.report("../Report/results/cancer.rf.tex", )
    h = Test(
        X, y,
        RandomForestClassifier(n_estimators=6,
                               criterion='gini',
                               max_features='sqrt',
                               random_state=1234))
    h.run()
    h.report(fn="../Report/results/cancer.rf.cm.tex")

    parameters = [{
        'kernel': ['linear', 'sigmoid', 'rbf', 'poly'],
        'C': [0.1, 1, 10, 11, 20]
    }]
    print("SVM")
    from sklearn import preprocessing
    X_scaled = preprocessing.scale(X)
    s = Search(X_scaled, y, SVC(), parameters)
    s.search()
    s.report("../Report/results/cancer.svm.tex")
    h = Test(X, y, SVC(C=11, kernel='poly'))
    h.run()
    h.report(fn="../Report/results/cancer.svm.cm.tex")

    print("KNeighborsClassifier")
    parameters = [{
        'n_neighbors': range(4, 8),
        'weights': ['uniform', 'distance'],
        'p': [1, 2]
    }]
    s = Search(X, y, KNeighborsClassifier(), parameters)
    s.search()
    s.report("../Report/results/cancer.knn.tex")
    h = Test(X, y, KNeighborsClassifier(n_neighbors=5, weights='uniform', p=2))
    h.run()
    h.report(fn="../Report/results/cancer.knn.cm.tex")