Exemple #1
0
def do_gradient_boost():
    x_train, _, y_train, _, x_test, y_test = Rf.read_data()
    ada_boost = GradientBoostingClassifier()
    ada_boost.fit(x_train, y_train)
    score = ada_boost.score(x_test, y_test)
    print(score)
    Rf.save_model("Gradient2", ada_boost)
Exemple #2
0
def do_Knn():
    x_train, _, y_train, _, x_test, y_test = Rf.read_data()
    knn = KNeighborsClassifier(n_neighbors=5)
    knn.fit(x_train, y_train)
    score = knn.score(x_test, y_test)
    print(score)
    Rf.save_model("Knn2", knn)
Exemple #3
0
def do_random_forest():
    x_train, _, y_train, _, x_test, y_test = Rf.read_data()
    random_forest = RandomForestClassifier()
    random_forest.fit(x_train, y_train)
    score = random_forest.score(x_test, y_test)
    print(score)
    Rf.save_model("Random_Forest2", random_forest)
Exemple #4
0
def do_Bagging():
    x_train, _, y_train, _, x_test, y_test = Rf.read_data()
    bagging = BaggingClassifier()
    bagging.fit(x_train, y_train)
    score = bagging.score(x_test, y_test)
    print(score)
    Rf.save_model("Bagging2", bagging)
Exemple #5
0
def do_ada_boost():
    x_train, _, y_train, _, x_test, y_test = Rf.read_data()
    ada_boost = AdaBoostClassifier()
    ada_boost.fit(x_train, y_train)
    score = ada_boost.score(x_test, y_test)
    print(score)
    Rf.save_model("Ada2", ada_boost)
Exemple #6
0
def do_svm():
    x_train, _, y_train, _, x_test, y_test = Rf.read_data()

    scaler = StandardScaler()
    x_train = scaler.fit(x_train).transform(x_train)

    svc = SVC(kernel='linear', max_iter=1000)
    svc.fit(x_train, y_train)
    score = svc.score(x_test, y_test)
    print(score)
    Rf.save_model("SVM2", svc)
Exemple #7
0
def SendConfigToAPIC(select_method, apic, configport):
    for value in configport:
        list_epg = Readfile.ReadEPG()
        vlan_list = value["VLAN"].split(",")

        for i in vlan_list:
            for j in list_epg:
                if j["Vlan"] == i:
                    tenant = j["Tenant"]
                    appprofile = j["AppProfile"]
                    epg = j["EPG Name"]
                    vlan = "vlan-" + str(j["Vlan"])
                    tDn = CreatePathConfig(value["POD"], value["nodeID_A"], value["nodeID_B"], value["Interface_ETH"], value["Interface_VPC"])
                    commit_config = apic.mit.polUni().fvTenant(tenant).fvAp(appprofile).fvAEPg(epg).fvRsPathAtt(encap=vlan, instrImedcy="immediate", tDn=tDn, status=select_method)

                    try:
                        result = commit_config.POST(format='xml')
                        print("Status Code: " + colored(str(result.status_code),
                                                        "green") + "\tMethod: " + select_method.upper())
                        print("Detail: " + "(Tenant) ===> " + colored(tenant, "yellow") + "\t(EPG) ===> " + colored(epg,"blue") + "\t(Path) ===> " + tDn)
                        print("---------------------------------")
                    except pyaci.errors.RestError as e:
                        parse = (ET.fromstring(str(e))).find('./error')
                        status = parse.attrib['code']
                        error = ((parse.attrib['text']).split("; "))[1]
                        print("Status code: " + colored(status,"red") + "\tMethod: " + select_method.upper() + "\nDetail: " + error)
                        print("---------------------------------")
                        continue
Exemple #8
0
def do_Knn_with_k():
    print("K=4")
    knn = KNeighborsClassifier(n_neighbors=4)
    Rf.k_validation(knn)
    print("K=5")
    knn = KNeighborsClassifier(n_neighbors=5)
    Rf.k_validation(knn)
    print("K=10")
    knn = KNeighborsClassifier(n_neighbors=10)
    Rf.k_validation(knn)
Exemple #9
0
def do_svm_with_k():
    print("linear SVC")
    svc = SVC(kernel='linear', max_iter=1000)
    Rf.k_validation(svc)
    print("polynomial SVC")
    svc = SVC(kernel='poly', max_iter=1000)
    Rf.k_validation(svc)
    print("rbf SVC")
    svc = SVC(kernel='rbf', max_iter=1000)
    Rf.k_validation(svc)
Exemple #10
0
def do_Knn_with_f1():
    print("K=4")
    knn = KNeighborsClassifier(n_neighbors=4)
    Rf.f1_validation(knn, 'micro')
    Rf.f1_validation(knn, 'macro')
    Rf.f1_validation(knn, 'weighted')
    print("K=5")
    knn = KNeighborsClassifier(n_neighbors=5)
    Rf.f1_validation(knn, 'micro')
    Rf.f1_validation(knn, 'macro')
    Rf.f1_validation(knn, 'weighted')
    print("K=10")
    knn = KNeighborsClassifier(n_neighbors=10)
    Rf.f1_validation(knn, 'micro')
    Rf.f1_validation(knn, 'macro')
    Rf.f1_validation(knn, 'weighted')
Exemple #11
0
def do_bagging_with_f1():
    bagging = BaggingClassifier()
    Rf.f1_validation(bagging, 'micro')
    Rf.f1_validation(bagging, 'macro')
    Rf.f1_validation(bagging, 'weighted')
    Rf.f1_validation(bagging, None)
Exemple #12
0
def do_gradient_boost_with_k():
    ada_boost = GradientBoostingClassifier()
    Rf.k_validation(ada_boost)
Exemple #13
0
def do_final_bagging():
    x_train, _, y_train, _, _ = Rf.read_data_final()
    bagging = BaggingClassifier()
    bagging.fit(x_train, y_train)
    Rf.save_model("Bagging", bagging)
Exemple #14
0
def do_Bagging_with_k():
    bagging = BaggingClassifier()
    Rf.k_validation(bagging)
Exemple #15
0
def do_gradient_boost_with_f1():
    ada_boost = GradientBoostingClassifier()
    Rf.f1_validation(ada_boost, 'micro')
    Rf.f1_validation(ada_boost, 'macro')
    Rf.f1_validation(ada_boost, 'weighted')
    Rf.f1_validation(ada_boost, None)
Exemple #16
0
    Visualization.AccuracyInEpochs(ys, 20)


def question3():
    rate = 0.00005
    count = 0
    con_ws = Perceptrons.Weights([-1, 0, 0.5, -0.5, 0.5], rate)
    cla_ws = Perceptrons.Weights([-1.101, -0.008, 0.652, -0.372, 0.412], rate)
    conf = []
    classf = []
    col = []
    for sample in data:
        if con_ws.activation(sample) == sample[-1]:
            count += 1
            col.append('g')
        else:
            col.append('r')
        conf.append(round(con_ws.result, 3))
        cla_ws.activation(sample)
        classf.append(round(cla_ws.result, 3))
    Visualization.confidence(conf, classf, col)
    print(count / len(data))


data = Readfile.readfile("iris.arff")
#question1()
question2()
#question3()
#question 4
#Visualization.scatterplot(data)
Exemple #17
0
def do_svm_with_f1():
    print("linear SVC")
    svc = SVC(kernel='linear', max_iter=1000)
    Rf.f1_validation(svc, 'micro')
    Rf.f1_validation(svc, 'macro')
    Rf.f1_validation(svc, 'weighted')
    Rf.f1_validation(svc, None)
    print("poly SVC")
    svc = SVC(kernel='poly', max_iter=1000)
    Rf.f1_validation(svc, 'micro')
    Rf.f1_validation(svc, 'macro')
    Rf.f1_validation(svc, 'weighted')
    Rf.f1_validation(svc, None)
    print("rbf SVC")
    svc = SVC(kernel='rbf', max_iter=1000)
    Rf.f1_validation(svc, 'micro')
    Rf.f1_validation(svc, 'macro')
    Rf.f1_validation(svc, 'weighted')
    Rf.f1_validation(svc, None)
Exemple #18
0
def do_random_forest_with_k():
    random_forest = RandomForestClassifier()
    Rf.k_validation(random_forest)
Exemple #19
0
def do_random_forest_with_f1():
    random_forest = RandomForestClassifier()
    Rf.f1_validation(random_forest, 'micro')
    Rf.f1_validation(random_forest, 'macro')
    Rf.f1_validation(random_forest, 'weighted')
    Rf.f1_validation(random_forest, None)
Exemple #20
0
import Readfile
import Models
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression

path1 = "neg.tok"
path2 = "pos.tok"

r_unigram = Readfile.Readfile(path1, path2, index=1)
r_bigram = Readfile.Readfile(path1, path2, index=2)
data_unigram = r_unigram.data_set
data_bigram = r_bigram.data_set
model = Models.Models()


def test_model(data, classifier, message='-'):
    print('-------------------%s below--------------------' % message)
    data_set = model.binary_feature(data)
    np.random.shuffle(data_set)
    row, col = data_set.shape
    X = data_set[:, :col - 1]
    y = data_set[:, col - 1]
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
    cv_results = cross_validate(classifier,
                                X_train,
                                y_train.ravel(),
Exemple #21
0
def do_ada_boost_with_f1():
    ada_boost = AdaBoostClassifier()
    Rf.f1_validation(ada_boost, 'micro')
    Rf.f1_validation(ada_boost, 'macro')
    Rf.f1_validation(ada_boost, 'weighted')
    Rf.f1_validation(ada_boost, None)
Exemple #22
0
def do_ada_boost_with_k():
    ada_boost = AdaBoostClassifier()
    Rf.k_validation(ada_boost)
Exemple #23
0
import Readfile
import Dictionarysearch
import Sentenceprocess

path_train = 'brown.train.tagged.txt'
path_test = 'brown.test.txt'
path_test_result = 'brown.test.tagged.txt'

ds = Dictionarysearch.Dictionarysearch()
sp = Sentenceprocess.Sentenceprocess()
r1 = Readfile.Readfile(path_train)
r1.sentence_process(r1.tag_index)
test_file = open(path_test)
test_line = []
test_tagged_file = open(path_test_result)
test_tagged_line = []
for line in test_file:
    test_line.append(line)
for line in test_tagged_file:
    test_tagged_line.append(line)

word_array = sp.sentence2word(test_line)

word_tagged_array = sp.sentence2word(test_tagged_line)
index = 0
success = 0
total_unknown = 0

for word in word_array:

    result = (ds.find_most_frequent_tag(word, r1.tag_dictionary))
Exemple #24
0
def do_final_Knn():
    x_train, _, y_train, _, _ = Rf.read_data_final()
    knn = KNeighborsClassifier()
    knn.fit(x_train, y_train)
    Rf.save_model("Knn", knn)
Exemple #25
0
def do_final_rf():
    x_train, _, y_train, _, _ = Rf.read_data_final()
    random_forest = RandomForestClassifier()
    random_forest.fit(x_train, y_train)
    Rf.save_model("Random_Forest", random_forest)
import Readfile
import ProbCalculate

path_en = 'EN.txt'
lang_en = 'English'
path_fr = 'FR.txt'
lang_fr = 'French'
path_gr = 'GR.txt'
lang_gr = 'German'
path_test = 'LangID.test.txt'
lang_test = 'English'
path_res = 'LangID.gold.txt'

english_file = Readfile.Readfile(path_en, 'English')
english_file.change_str2word()
# print(english_file.word_file)
english_file.build_word_dic()
english_file.build_probability()
english_file.test_word_dictionary()

french_file = Readfile.Readfile(path_fr, 'French')
french_file.change_str2word()
# print(french_file.word_file)
french_file.build_word_dic()
french_file.build_probability()
french_file.test_word_dictionary()

german_file = Readfile.Readfile(path_gr, 'German')
german_file.change_str2word()
# print(german_file.word_file)
german_file.build_word_dic()
Exemple #27
0
import Camera
import SingleObject
import Readfile
import Shading
#	Loading file
Points1, Polygons1, numofPolygon1 = Readfile.readfile('D files/nteapot6.d.txt')
#	Camera configuration
c_pos = [5, 5, 10]
p_ref = [0, 0, 0]
d = 20
h = 15
f = 80
c = Camera.Camera(c_pos, p_ref, d, h, f)
#	Object initialization
object1 = SingleObject.SingleObject(Points1, Polygons1, [0, -1, 0],
                                    numofPolygon1)
#	default backfaceculling is on
object1.backfaceculling(c)
object1.WorldtoScreen(c)
object1.ScreentoDevices()
#	Object's color(RGB)
object1.color = [20, 200, 160]
Shading.Rendering.color = object1.color
#	Light source configuration
Shading.light = [30, 30, 30]
Shading.i_light = [0.8, 0.8, 0.8]
Shading.ka = 0.4
Shading.ks = 1
Shading.kd = 0.6
Shading.c_pos = c_pos
#	Texture image