def test_process(self): iris_mat_train, iris_label_train = dataset.load_iris("sample_data", "training", one_hot=True) iris_mat_test, iris_label_test = dataset.load_iris("sample_data", "testing", one_hot=True) linear_reg = LinearRegression(iris_mat_train, iris_label_train) linear_reg.fit(lr = 0.0001, epoch = 1000, batch_size = 20) error_rate = autotest.eval_predict(linear_reg, iris_mat_test, iris_label_test, self.logging, one_hot=True) self.tlog("iris predict (with linear regression) error rate :" + str(error_rate))
def test_process(self): lense_mat_train, lense_label_train, lense_mat_test, lense_label_test=\ fs.tsv_loader("sample_data/lense/lense.tsv", 0.3) dtree_lense = DecisionTreeID3(lense_mat_train,lense_label_train) tree_structure = dtree_lense.build() self.tlog("Tree structure : " + str(tree_structure)) error_rate = autotest.eval_predict(dtree_lense, lense_mat_test, lense_label_test, self.logging) self.tlog("lense predict (with decision tree) error rate : " +str(error_rate))
def test_process(self): iris_mat_train, iris_label_train = dataset.load_iris("sample_data", "training", one_hot=True) iris_mat_test, iris_label_test = dataset.load_iris("sample_data", "testing", one_hot=True) fnn = FNN(iris_mat_train, iris_label_train, [2]) fnn.fit(lr = 0.001, epoch = 4000, err_th = 0.00001, batch_size = 30) error_rate = autotest.eval_predict(fnn, iris_mat_test, iris_label_test, self.logging, one_hot=True) self.tlog("iris predict (with fnn) error rate :" + str(error_rate))
def test_process(self): normed_dmat_train = self.get_global_value('normed_iris_mat_train') normed_dmat_test = self.get_global_value('normed_iris_mat_test') dlabel_train = self.get_global_value('iris_label_train') dlabel_test = self.get_global_value('iris_label_test') knn_date = KNN(normed_dmat_train, dlabel_train, 3, 'euclidean') error_rate = autotest.eval_predict(knn_date, normed_dmat_test, dlabel_test, self.logging) self.tlog("date predict (with basic knn) error rate : " + str(error_rate))
def test_process(self): lense_mat_train, lense_label_train, lense_mat_test, lense_label_test = fs.tsv_loader( "sample_data/lense/lense.tsv", 0.3 ) dtree_lense = DecisionTreeID3(lense_mat_train, lense_label_train) tree_structure = dtree_lense.build() self.tlog("Tree structure : " + str(tree_structure)) error_rate = autotest.eval_predict(dtree_lense, lense_mat_test, lense_label_test, self.logging) self.tlog("lense predict (with decision tree) error rate : " + str(error_rate))
def test_process(self): MAJOR_AP_COUNT = 17 BAD_SIGNAL = -100 areaf = open("sample_data/wrm/wrm.json.dat") area_json_list = areaf.readlines() areaf.close() area_set = {} ap_set = {} def compare(x, y): if x['rssi'] < y['rssi']: return 1 elif x['rssi'] == y['rssi']: return 0 else: return -1 for aobj in area_json_list: area = json.loads(aobj) label = area["areaID"] aplist = area["apList"] aplist.sort(compare) for ap in aplist[:MAJOR_AP_COUNT]: ap_set[ap['bssid']] = 1 area_set[label] = area_set.get(label, []) area_set[label].append(aplist[:MAJOR_AP_COUNT]) ap_vector_column = ap_set.keys() train_mat = [] train_label = [] test_mat = [] test_label = [] count = 0 for label in area_set: for aps in area_set[label]: ap_vector = tile(BAD_SIGNAL, len(ap_vector_column)) for ap in aps: ap_vector[ap_vector_column.index(ap['bssid'])] = ap['rssi'] count += 1 if count % 10 == 0: test_label.append(label) test_mat.append(ap_vector) else: train_label.append(label) train_mat.append(ap_vector) gnb = GaussianNaiveBayes(train_mat, train_label) gnb.fit() error_rate = autotest.eval_predict(gnb, test_mat, test_label, self.logging) self.tlog("rssi predict (with GaussianNaiveBayes) error rate : " + str(error_rate))
def test_process(self): MAJOR_AP_COUNT = 17 BAD_SIGNAL = -100 areaf = open("sample_data/wrm/wrm.json.dat") area_json_list = areaf.readlines() areaf.close() area_set = {} ap_set = {} def compare(x,y): if x['rssi'] < y['rssi']: return 1 elif x['rssi'] == y['rssi']: return 0 else: return -1 for aobj in area_json_list: area = json.loads(aobj) label = area["areaID"] aplist = area["apList"] aplist.sort(compare) for ap in aplist[:MAJOR_AP_COUNT]: ap_set[ap['bssid']] = 1 area_set[label] = area_set.get(label,[]) area_set[label].append(aplist[:MAJOR_AP_COUNT]) ap_vector_column = ap_set.keys() train_mat = [] train_label = [] test_mat = [] test_label = [] count = 0; for label in area_set: for aps in area_set[label]: ap_vector = tile(BAD_SIGNAL, len(ap_vector_column)) for ap in aps: ap_vector[ap_vector_column.index(ap['bssid'])] = ap['rssi'] count += 1 if count % 10 == 0: test_label.append(label) test_mat.append(ap_vector) else : train_label.append(label) train_mat.append(ap_vector) gnb = GaussianNaiveBayes(train_mat,train_label) gnb.fit() error_rate = autotest.eval_predict(gnb, test_mat, test_label, self.logging) self.tlog("rssi predict (with GaussianNaiveBayes) error rate : " + str(error_rate))
def test_process(self): iris_mat_train, iris_label_train = dataset.load_iris( "sample_data", "training") iris_mat_test, iris_label_test = dataset.load_iris( "sample_data", "testing") knn = KNN(iris_mat_train, iris_label_train, 3, 'manhattan') error_rate = autotest.eval_predict(knn, iris_mat_test, iris_label_test, self.logging) self.tlog("iris predict (with basic knn) error rate :" + str(error_rate))
def test_process(self): dg_mat_train, dg_label_train = dataset.load_mnist( "sample_data", "training") dg_mat_test, dg_label_test = dataset.load_mnist( "sample_data", "testing") knn_digit = KNN(dg_mat_train, dg_label_train, 10, 'euclidean') error_rate = autotest.eval_predict(knn_digit, dg_mat_test, dg_label_test, self.logging) self.tlog("digit predict (with basic knn) error rate :" + str(error_rate))
def test_process(self): dg_mat_train, dg_label_train = dataset.load_mnist("sample_data", "training", one_hot=True) dg_mat_test, dg_label_test = dataset.load_mnist("sample_data", "testing", one_hot=True) fnn = FNN(dg_mat_train, dg_label_train, [400, 100]) fnn.fit(lr=0.01, epoch=1000, err_th=0.00001, batch_size=100) error_rate = autotest.eval_predict(fnn, dg_mat_test, dg_label_test, self.logging, one_hot=True) self.tlog("digit predict (with fnn) error rate :" + str(error_rate))
def test_process(self): iris_mat_train, iris_label_train = dataset.load_iris("sample_data", "training", one_hot=True) iris_mat_test, iris_label_test = dataset.load_iris("sample_data", "testing", one_hot=True) fnn = FNN(iris_mat_train, iris_label_train, [2]) fnn.fit(lr=0.001, epoch=4000, err_th=0.00001, batch_size=30) error_rate = autotest.eval_predict(fnn, iris_mat_test, iris_label_test, self.logging, one_hot=True) self.tlog("iris predict (with fnn) error rate :" + str(error_rate))
def test_process(self): iris_mat_train, iris_label_train = dataset.load_iris("sample_data", "training", one_hot=True) iris_mat_test, iris_label_test = dataset.load_iris("sample_data", "testing", one_hot=True) logistic_reg = LogisticRegression(iris_mat_train, iris_label_train) logistic_reg.fit(lr=0.001, epoch=2000, batch_size=30) error_rate = autotest.eval_predict(logistic_reg, iris_mat_test, iris_label_test, self.logging, one_hot=True) self.tlog("iris predict (with logistic regression) error rate :" + str(error_rate))
def test_process(self): dg_mat_train, dg_label_train = dataset.load_mnist("sample_data", "training", one_hot=True) dg_mat_test, dg_label_test = dataset.load_mnist("sample_data", "testing", one_hot=True) logistic_reg = LogisticRegression(dg_mat_train, dg_label_train) logistic_reg.fit(lr=0.0001, epoch=1000, batch_size=100) error_rate = autotest.eval_predict(logistic_reg, dg_mat_test, dg_label_test, self.logging, one_hot=True) self.tlog("digit predict (with logistic regression) error rate :" + str(error_rate))
def test_process(self): nlp_eng = nlp("eng") email_data_file = "sample_data/email/email.tsv" emailmat_train, emaillabel_train, voca, emailmat_test, emaillabel_test = fs.tsv_loader_with_nlp( email_data_file, 0.3, nlp_eng ) self.tlog(voca) email_nbayes = NaiveBayes(emailmat_train, emaillabel_train) email_nbayes.fit() error_rate = autotest.eval_predict(email_nbayes, emailmat_test, emaillabel_test, self.logging) self.tlog("spam-mail predict (with NaiveBayes) error rate : " + str(error_rate)) assert error_rate <= 0.1
def test_process(self): nlp_eng = nlp("eng_lower") email_data_file = "sample_data/email/email.tsv" emailmat_train, emaillabel_train, voca, emailmat_test, emaillabel_test \ = fs.tsv_loader_with_nlp(email_data_file, 0.4, nlp_eng) self.tlog(voca) email_nbayes = NaiveBayes(emailmat_train, emaillabel_train) email_nbayes.fit() error_rate = autotest.eval_predict(email_nbayes, emailmat_test, emaillabel_test, self.logging) self.tlog("spam-mail predict (with NaiveBayes) error rate : " + str(error_rate)) assert error_rate <= 0.1
def test_process(self): train_mat = [\ [-65,-55,-42],[-20,-59,-71],[-43,-49,-69],\ [-61,-30,-74],[-79,-81,-40],[-71,-57,-24],\ [-67,-19,-58],[-57,-73,-83],[-68,-74,-59],\ [-80,-85,-79] ] train_label = ['B','A','A','A','B','B','A','C','C','C'] test_mat = [\ [-45,-47,-74],[-77,-69,-25],[-64,-71,-59],\ [-85,-85,-25],[-85,-85,-85] ] test_label = ['A','B','C','B','C'] gnb = GaussianNaiveBayes(train_mat,train_label) gnb.fit() error_rate = autotest.eval_predict(gnb, test_mat, test_label, self.logging) self.tlog("strength of signal predict error rate : " + str(error_rate))
def test_process(self): train_mat = [\ [-65,-55,-42],[-20,-59,-71],[-43,-49,-69],\ [-61,-30,-74],[-79,-81,-40],[-71,-57,-24],\ [-67,-19,-58],[-57,-73,-83],[-68,-74,-59],\ [-80,-85,-79] ] train_label = ['B', 'A', 'A', 'A', 'B', 'B', 'A', 'C', 'C', 'C'] test_mat = [\ [-45,-47,-74],[-77,-69,-25],[-64,-71,-59],\ [-85,-85,-25],[-85,-85,-85] ] test_label = ['A', 'B', 'C', 'B', 'C'] gnb = GaussianNaiveBayes(train_mat, train_label) gnb.fit() error_rate = autotest.eval_predict(gnb, test_mat, test_label, self.logging) self.tlog("strength of signal predict error rate : " + str(error_rate))
def test_process(self): dg_mat_train, dg_label_train = dataset.load_mnist("sample_data", "training", one_hot=True) dg_mat_test, dg_label_test = dataset.load_mnist("sample_data", "testing", one_hot=True) svc = SVC(dg_mat_train, dg_label_train) svc.fit(C=1.5, toler=0.0001, epoch=1000, kernel="RBF", kernel_params={"gamma": 0.7}) error_rate = autotest.eval_predict(svc, dg_mat_test, dg_label_test, self.logging, one_hot=True) self.tlog("digit predict (with svc) error rate :" + str(error_rate))
def test_process(self): iris_mat_train, iris_label_train = dataset.load_iris("sample_data", "training", one_hot=True) iris_mat_test, iris_label_test = dataset.load_iris("sample_data", "testing", one_hot=True) svc = SVC(iris_mat_train, iris_label_train) svc.fit(C=1.5, toler=0.0001, epoch=1000, kernel="Polynomial", kernel_params={"degree": 3}) error_rate = autotest.eval_predict(svc, iris_mat_test, iris_label_test, self.logging, one_hot=True) self.tlog("iris predict (with svc) error rate :" + str(error_rate))