Exemple #1
0
def main():
    for kfold in range(10):
        inputdata, outputdata = Readcsv()
        trainin, trainout, testin, testout = fold(inputdata, outputdata)
        data = np.array(inputdata)
        out = np.array(outputdata)
        wout, biout, win, biin = train(trainin, trainout)
        target = test(wout, biout, win, biin, testin)
        print(checkerror(target, testout))
Exemple #2
0
    def dealMessage(self):
        Msg = self.edit.toPlainText()
        self.edit.clear()
        message = messageW(Msg, 'user')
        item = QListWidgetItem(self.list)
        message.setFixedWidth(self.width() - 25)
        item.setSizeHint(message.fontRect())
        self.list.setItemWidget(item, message)
        self.list.scrollToBottom()

        # ............................. #
        # print("#$#$#$#$#$#$")
        returnMsg = ""

        with open("./data/9939jbks.json", "r", encoding="utf-8") as file_ks:
            ks = json.load(file_ks)

        ifok = True
        str_msg = Msg
        for ele in ks:
            if ele["病名"] == str_msg:
                # print(ele["挂号科室"])
                for e in ele["挂号科室"]:
                    returnMsg = returnMsg + e + " "
                ifok = False
                break
            # print(Msg)
        # returnMsg = "jdksajhk"

        if ifok:
            file_tree = "./tree/decision_tree.json"
            tree_test = preprocessing.Decision_Tree(ifload=True,
                                                    file_name=file_tree)
            returnMsg = preprocessing.test(str_msg, tree_test)
        # ............................. #

        message = messageW(returnMsg, 'helper')
        item = QListWidgetItem(self.list)
        message.setFixedWidth(self.width() - 25)
        item.setSizeHint(message.fontRect())
        self.list.setItemWidget(item, message)
        self.list.scrollToBottom()
Exemple #3
0
import arff 
import numpy as np
import math
import random
from preprocessing import preprocessing,getoutputData,test,train


dataset = arff.load(open('Autism-Child-Data.arff', 'rb'))
data = np.array(dataset['data'])

#Input array
x = preprocessing(data,21)
#Output
y = train(x)
o = getoutputData(x,73)

l = test(x)
u = getoutputData(x,219)

def entropy() :
    kuy = math.log2(10)
    return 0
print('Building Tokenizer...')
tokenizer = preprocessing.get_tokenizer(df)

score = np.zeros(shape=(0))
label = np.zeros(shape=(0))
target_correct = 0

for i in range(len(projects)):  #len(projects)

    project = projects[i]
    print('*' * 80)
    print(project)
    ss = time.time()
    models = preprocessing.train(df[df.projectname != project], tokenizer,
                                 project)
    print('###########################', time.time() - ss)

    #models=preprocessing.load_models(project)
    ss = time.time()
    tscore, tlabel, t_target_correct = preprocessing.test(
        df_classes, df_items[df_items.projectname == project], tokenizer,
        models)
    print('###########################', time.time() - ss)
    score = np.concatenate((score, tscore.reshape(-1)), axis=0)
    label = np.concatenate((label, tlabel.reshape(-1)), axis=0)
    target_correct += t_target_correct

print('*' * 80)
print('Final')
preprocessing.eval(score, label, target_correct)
Exemple #5
0
dataset = arff.load(open('Autism-Child-Data.arff', 'rb'))
data = np.array(dataset['data'])
X = preprocessing(data, 21)


def sigmoid(x, deriv=False):
    if (deriv == True):
        return x * (1 - x)
    return 1 / (1 + np.exp(-x))


y = train(X)
# y = [[float(n) for n in m] for m in y]
o = getoutputData(X, 10)
z = test(X)
p = getoutputData(z, 282)
weight_input = 2 * np.random.random((17, 5)) - 1
weight_hidden = 2 * np.random.random((6, 1)) - 1

print o
learning_rate = [[0.01]]

print "Start : "

# code

for i in xrange(1, 1500, 1):
    for j in xrange(282):
        input_layer = np.array([y[j]])
        hidden_layer_out = sigmoid(np.dot(input_layer, weight_input))