예제 #1
0
    def connectIP(self):

        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            self.s.bind((self.exo1.pcIP, self.exo1.ipPort))
        except socket.error:
            with self.recDataLock:
                self.recData.put('IP Bind Failed\n')
        self.s.listen(5)
        conIP, addr = self.s.accept()
        with self.recDataLock:
            self.recData.put('TCP/IP Bind success\n')

        while self.tcpipConnect.is_set():
            data = conIP.recv(1024)
            print(data)
            cmdList = []
            dp.comSep(data.decode('utf-8'), cmdList)
            for cmd in cmdList:
                dataList = []
                dp.dataSep(cmd, dataList)
                with self.plotLock:
                    self.plotData.get()
                    self.plotData.put(dataList)

            time.sleep(0.05)
예제 #2
0
def main():

    global G_igraph, G_nx, nodes_vector, aca_graph

    G_igraph, G_nx, nodes_vector = dp.generate_data()

    aca_graph = dp.separate_data_by_academy(nodes_vector, G_igraph)
예제 #3
0
def main():
    if not os.path.exists("train_set.npy"):
        # 提取数据集中的样本,并划分训练集和测试集
        trainData, testData = DataProcess.getDatasetFromFile(Path)
        #特征提取与词典生成
        vocabulary = Vocabulary.Vocabulary(numOfBag)
        vocabulary.generateBoW("feature_set.npy", random_state)
        # vocabulary.getBow("Bow.npy")
        # 图片表示,使用SPM方法生成图片特征向量
        trainSet, trainlabels = vocabulary.Imginfo2SVMdata(trainData)
        testSet, testlabels = vocabulary.Imginfo2SVMdata(testData)
        np.save("train_set.npy", trainSet)
        np.save("train_label.npy", trainlabels)
        np.save("test_set.npy", testSet)
        np.save("test_label.npy", testlabels)
    else:
        trainSet = np.load("train_set.npy")
        trainlabels = np.load("train_label.npy")
        testSet = np.load("test_set.npy")
        testlabels = np.load("test_label.npy")

    # 执行分类,使用支持向量机模型对测试集图片的类别进行预测
    svm = ClassifierKernel.trainSVM(trainSet, trainlabels)
    result = ClassifierKernel.predict(trainSet, svm)
    testResult = ClassifierKernel.predict(testSet, svm)

    # 评估预测结果,并生成分类报告和输出混淆矩阵
    trainReport, _ = ClassifierKernel.evaluate(result, trainlabels)
    testReport, cm = ClassifierKernel.evaluate(testResult, testlabels)

    DataProcess.plotCM(targetName, cm, "confusion_matrix.png")
    print("训练集混淆矩阵:")
    print(trainReport)
    print("测试集混淆矩阵:")
    print(testReport)
예제 #4
0
def main():

    global G_igraph, G_nx, nodes_vector, aca_graph

    G_igraph, G_nx, nodes_vector = dp.generate_data()

    # TODO 读取上传文件的图信息,获得图信息 (G_igraph, G_nx) 和 nodes_vector

    aca_graph = dp.separate_data_by_academy(nodes_vector, G_igraph)
예제 #5
0
def tf_model_test(path: str):
    model = keras.models.load_model(path)
    model.summary()
    for i in range(10):
        DataProcess.divideTrainAndVerify('comb_rate.csv', 'train.csv',
                                         'verify.csv', 0.75)
        (verify_data,
         verify_labels) = DataProcess.extractFlagForRate("verify.csv")
        model.evaluate(verify_data, verify_labels, verbose=2)
예제 #6
0
def run_epoch(sess, model, data, batch_size, writer, saver):
    global global_step
    flag = False

    test_set_size = int(len(data) // 10)
    train_data = data[:len(data) - test_set_size]
    test_data = data[len(data) - test_set_size:]
    train_x = train_data[:, :-1]
    train_label = DataProcess.to_one_hot(train_data[:, -1], 4)
    test_x = test_data[:, :-1]
    test_label = DataProcess.to_one_hot(test_data[:, -1], 4)

    #train

    for i in xrange((len(data) - test_set_size) // batch_size):
        global_step += 1
        feed_dic = {}
        feed_dic[model.data_in.name] = train_x[i * batch_size:(i + 1) *
                                               batch_size]
        feed_dic[model.true_label.name] = train_label[i * batch_size:(i + 1) *
                                                      batch_size]
        _, accu, los, merged_summ, is_nan = sess.run([
            model.train_op, model.accurate, model.loss, model.merged_summary,
            model.loss_is_nan
        ], feed_dic)
        if is_nan:
            print(train_x[i * batch_size:(i + 1) * batch_size])
            print(train_label[i * batch_size:(i + 1) * batch_size])
            exit()
        if global_step % 1000 == 0:
            flag = True
            print("training:"),
            print("los:%.7f, accu:%.7f" % (los, accu))
            writer.add_summary(merged_summ, global_step // 1000)
            saver.save(sess, "checkpoint_rnn/modelRNN.ckpt")
    #test
    test_losses = []
    test_accuracy = []
    #*****************************************
    summ = np.zeros([4, 4], dtype=np.int16)
    for j in range(test_set_size // batch_size):
        feed_dic = {}
        feed_dic[model.data_in.name] = test_x[j * batch_size:(j + 1) *
                                              batch_size]
        feed_dic[model.true_label.name] = test_label[j * batch_size:(j + 1) *
                                                     batch_size]
        actual, predict = sess.run([model.true_in, model.predict], feed_dic)
        for x, y in zip(actual, predict):
            summ[x, y] += 1
    print(summ)
    #*****************************************
    if flag:
        print("test:"),
        print("los:%.2f, accu:%.2f" %
              (np.mean(test_losses), np.mean(test_accuracy)))
        flag = False
예제 #7
0
def run_epoch(sess, model, data, batch_size, writer, saver):
    global global_step
    flag = False
    test_set_size = int(len(data) / 10)
    train_data = data[:len(data) - test_set_size]
    test_data = data[len(data) - test_set_size:]
    train_x = train_data[:, :-1]
    train_label = DataProcess.to_one_hot(train_data[:, -1], 4)
    test_x = test_data[:, :-1]
    test_label = DataProcess.to_one_hot(test_data[:, -1], 4)

    # train

    for i in range(int((len(data) - test_set_size) / batch_size)):
        global_step += 1
        feed_dic = {}
        feed_dic[model.data_in.name] = train_x[i * batch_size:(i + 1) *
                                               batch_size]
        feed_dic[model.true_label.name] = train_label[i * batch_size:(i + 1) *
                                                      batch_size]
        _, accu, los, merged_summ, is_nan, fc1_, grads_ = sess.run([
            model.train_op, model.accurate, model.loss, model.merged_summary,
            model.loss_is_nan, model.fc1, model.grads
        ], feed_dic)
        if is_nan:
            print(train_x[i * batch_size:(i + 1) * batch_size])
            print(train_label[i * batch_size:(i + 1) * batch_size])
            exit()
        if global_step % 10000 == 0:
            flag = True
            # print "grads", grads_[0:2]
            # print "fc1:",fc1_[0:4,:]
            # print "output_",output_[0:4,:]
            # print "relu_output",relu_output_[0:4,:]
            print("training:   "),
            print("los:%.2f, accu:%.2f" % (los, accu))
            writer.add_summary(merged_summ, global_step // 10000)
            saver.save(sess, "checkpoint_cnn_v1/modelCNN")
    # test
    test_losses = []
    test_accuracy = []
    for j in range(int(test_set_size / batch_size)):
        feed_dic = {}
        feed_dic[model.data_in.name] = test_x[j * batch_size:(j + 1) *
                                              batch_size]
        feed_dic[model.true_label.name] = test_label[j * batch_size:(j + 1) *
                                                     batch_size]
        accu, los = sess.run([model.accurate, model.loss], feed_dic)
        test_accuracy.append(accu)
        test_losses.append(los)
    if flag:
        print("test:   "),
        print("los:%.2f, accu:%.2f" %
              (np.mean(test_losses), np.mean(test_accuracy)))
        flag = False
예제 #8
0
def learning(url):
    DataProcess.get_csv()#首次运行时获取数据
    yq=ML2.SVM()
    yq.testurl=url
    yq.Train()
    yq_res= yq.predict_single()[0]

    ML.textClassifier()#首次运行时训练分类器
    t=ML2.textPredictor()
    t.urlpre=testurl
    t.crawl()
    t.predict()
    text_res=t.get_result()
    return res={'porn_site':text_res[0],'gaming_site':text_res[1],'phishing_site':yq_res}
예제 #9
0
def closest_group(req_id):
    '''
    处理学院聚集度

    ----------
    parameter
      req_id 请求编号

    return
      渲染的模板
    '''

    global G_igraph, G_nx, nodes_vector, aca_graph, data

    paramas = request.args  # GET 请求参数

    if req_id == 0:  # 渲染 coefficient 页面

        return render_template('new_coefficience.html')

    elif req_id == 1:  # 选择两个学院

        # 参数:学院 ID
        aca1_id = int(paramas['acaId1']) - 1
        aca2_id = int(paramas['acaId2']) - 1

        # 分别计算两个院的 Cluster Coefficient
        aca1_graph_json = dp.graph_json(
            nx.DiGraph(aca_graph[aca1_id].get_edgelist()), nodes_vector)

        aca1_coefficient = c.cluster_coefficient_calculation(
            nx.DiGraph(
                aca_graph[aca1_id].
                get_edgelist()).to_undirected(reciprocal=False))

        aca2_graph_json = dp.graph_json(
            nx.DiGraph(aca_graph[aca2_id].get_edgelist()), nodes_vector)

        aca2_coefficient = c.cluster_coefficient_calculation(
            nx.DiGraph(
                aca_graph[aca2_id].
                get_edgelist()).to_undirected(reciprocal=False))

        # 根据请求返回需要的对应格式的数据
        data = {'AcademyGraph1': aca1_graph_json,
                'AcademyGraph2': aca2_graph_json,
                'AcademyCoefficient1': aca1_coefficient,
                'AcademyCoefficient2': aca2_coefficient}

        return make_response(json.dumps(data))
예제 #10
0
 def handleDataQueue(self):
     if self.runFlag:
         data_timer = Timer(0.1,self.handleDataQueue)
 #         print "Enter HandleDataQueue____________________"
         itemdata = None
         try:
             itemdata = self.dataQueue.get_nowait()
         except Exception as e:
             print "handleDataQueue Error: ", e
         if itemdata:
 #             print itemdata
             DataProcess.dataEntrance(itemdata,self.mysqlConnect_data)
 #             self.addTask(time.time()+2, PumpCheck("1#","2#","3#"))
         data_timer.start()
예제 #11
0
 def Initialization(self, init):
     contents = init.split("|")
     version = contents[1]
     BestHeight = contents[2]
     if contents[4]:  #客户端参与挖矿
         DataProcess.addIp(contents[4])
     db = Conlmdb()
     if version == Utils().blockversion:
         #验证版本号是否一致
         if BestHeight >= str(db.dbsize()):
             return "Lastest"
         else:
             return str(db.dbsize())
     else:
         return None
예제 #12
0
    def setDfMonthData(self, startDate=None, endDate=None):
        dpMonthObj = dp.DataProcess(self.code, self.name, period=dp.MONTH)
        dpMonthObj.readData()
        self.dpMonthObj = dpMonthObj
        if (os.path.exists(dpMonthObj.dataGenCsvFile)):
            self.dfMonthGenData = pd.read_csv(dpMonthObj.dataGenCsvFile,
                                              encoding=sd.UTF_8,
                                              dtype={'code': str})
            self.dfMonthFilterData = self.filterData(self.dfMonthGenData,
                                                     startDate, endDate)
        else:
            print(
                '[Function:%s line:%s stock:%s] Error: File %s is not exist' %
                (self.setDfWeekData.__name__, sys._getframe().f_lineno,
                 self.code, dpMonthObj.dataGenCsvFile))
            sys.exit()

        if (os.path.exists(dpMonthObj.signalReportFile)):
            self.dfMonthSignalData = pd.read_csv(dpMonthObj.signalReportFile,
                                                 encoding=sd.UTF_8,
                                                 dtype={'code': str})
        else:
            print(
                '[Function:%s line:%s stock:%s] Error: File %s is not exist' %
                (self.setDfWeekData.__name__, sys._getframe().f_lineno,
                 self.code, dpMonthObj.signalReportFile))
            sys.exit()

        self.reportPath = dpMonthObj.dataPath + self.code + '_exchange_report.csv'
예제 #13
0
def getComparePlayer():
    json_data = request.json
    # json_data = request.get_json()
    name = json_data["name"]
    team = json_data["team"]
    players = DataProcess.getPlayerDetails(name, team)
    return json.dumps(players, indent=2)
예제 #14
0
def getPositionPlayers():
    json_data = request.json
    # json_data = request.get_json()
    name = json_data["name"]
    positionNo = json_data["positionNo"]
    players = DataProcess.positionSort(ws, positionNo, "Position No")
    return json.dumps(players, indent=2)
예제 #15
0
def app_principal():
    """ This is the main method of application.
     It start this app and control it
    """
    start = dp.DataProcess()
    t_a, h_a, h_s = start.data_in()
    start.define_disease_table_data(t_a, h_a, h_s)
예제 #16
0
def geolfm1(A, X, epsilon, n):
    (p, q, t) = Initlfm(A, n)
    lam1 = 0.05
    alpha = 0.05
    ep = 100
    error = 0
    T = DataProcess.Dic2Array(t)
    while ep > epsilon:
        lasterror = error
        error = 0
        for u, i, rui in A:
            # update p
            pui = Predict.Predict(u, i, p, q) - rui
            #number
            Pu = np.array(p[u])
            #get the u_th row in p
            Qi = np.array(q[i])
            Xi = np.array(X[i])
            Qp = Predict.PredictF(Pu, T) - Xi  #vector
            QpT = Predict.PredictF(Qp, T.T)
            p[u] = Pu - alpha * pui * Qi - lam1 * alpha * QpT - lam1 * alpha * Pu
            #update q
            q[i] = Qi - alpha * pui * Pu - lam1 * alpha * Qi
            #update t
            QpPu = Predict.cal(Qp, Pu)
            T = T - alpha * lam1 * QpPu - lam1 * alpha * T
            error = error + np.abs(Predict.Predict(u, i, p, q) - rui)
        ep = np.abs(lasterror - error)
        #       print "error"+str(error)
        alpha *= 0.9
    return (p, q)
예제 #17
0
def student_equivalence(req_id):

    global G_igraph, G_nx, nodes_vector, data
    paramas = request.args

    if req_id == 0:

        return render_template('/new_equivalence.html')

    elif req_id == 1:
        stu_id = int(paramas['studentId'])

        graph_json = dp.graph_json(G_nx, nodes_vector)  # 图信息

        G = G_nx.to_undirected(reciprocal=False)

        matrix = nx.to_numpy_matrix(G)

        # 分别计算struct-cosine 和 regular-cosine,结果存入字典
        array1 = c.select_similar(nodes_vector, matrix, stu_id, 0)
        array2 = c.select_similar(nodes_vector, matrix, stu_id, 1)

        top_ten_list = {'structral': array1, 'regular': array2}

        data = {'Graph': graph_json,
                'Rank': top_ten_list}  # 封装成 dict

        return make_response(json.dumps(data))
예제 #18
0
def sendBlock():
    #广播区块
    iplist = DataProcess.getIp()
    for ip in iplist:
        HOST = ip
        PORT = Utils().sport
        reactor.connectTCP(HOST, PORT, BTSClntFactory())
        reactor.run()
def tf_predict_na(path: str, saveTo: str):
    model = keras.models.load_model(path)
    model.summary()
    arr = DataProcess.ripLabels("alt_na_file.csv", ['flag', 'ID'])
    # arr = DataProcess.ripLabels("na_rate.csv", ['flag', 'ID'])
    # arr = model.predict(arr)
    arr = model.predict_classes(arr)
    np.savetxt(saveTo, arr, fmt='%f', delimiter=',')
예제 #20
0
def run_epoch(sess, model, file_name, batch_size, writer, saver):
    global global_step
    flag = False
    data = DataProcess.data_process(file_name)
    test_set_size = int(len(data) // 10)
    train_data = data[:len(data) - test_set_size]
    test_data = data[len(data) - test_set_size:]
    train_x = train_data[:, :-1].reshape(-1, 4, 4)
    train_label = DataProcess.to_one_hot(train_data[:, -1], 4)
    test_x = test_data[:, :-1].reshape(-1, 4, 4)
    test_label = DataProcess.to_one_hot(test_data[:, -1], 4)

    #train

    for i in xrange((len(data) - test_set_size) // batch_size):
        global_step += 1
        feed_dic = {}
        feed_dic[model.data_in.name] = train_x[i * batch_size:(i + 1) *
                                               batch_size]
        feed_dic[model.true_label.name] = train_label[i * batch_size:(i + 1) *
                                                      batch_size]
        _, accu, los, merged_summ, is_nan = sess.run([
            model.train_op, model.accurate, model.loss, model.merged_summary,
            model.loss_is_nan
        ], feed_dic)
        if is_nan:
            print(train_x[i * batch_size:(i + 1) * batch_size])
            print(train_label[i * batch_size:(i + 1) * batch_size])
            exit()
        if global_step % 1000 == 0:
            flag = True
            print("training:")
            print("los:%.7f, accu:%.7f" % (los, accu))
            writer.add_summary(merged_summ, global_step)
            saver.save(sess, "checkpoint/model")
    #test

    feed_dic = {}
    feed_dic[model.data_in.name] = test_x
    feed_dic[model.true_label.name] = test_label
    accu, los = sess.run([model.accurate, model.loss], feed_dic)
    if flag:
        flag = False
        print("test:")
        print("los:%.7f, accu:%.7f" % (los, accu))
예제 #21
0
파일: __main__.py 프로젝트: qihr/SaveMyGame
def main():
    print("Start Work")
    print('操作系统信息:' + str(platform.architecture()))
    FileHelper.Init()
    DataProcess.Init()
    t = threading.Thread(target=MonitorRunGame.Monitor)
    t.start()

    UIView.Init()
예제 #22
0
def dataObtain(new_years):
    '''通过访问位于项目文件夹r'360doc-spider\\urlList\\'内部的相关年份的链接列表。
	依次访问网页完成下载,把原始数据保存通过调用DataProcess处理
	保存至r'360doc-spider\\rawFile\\xxxx\\'中的txt文件中'''

    global years, response, isFailed
    years = new_years
    try:
        # 从位于此地址的urlFolder中查找相关年份的list
        urlFile = open(urlFolder + "urlList" + str(years) +
                       ".txt")  # 文件命名方式‘urlList’+年份,txt格式
        targetDir_text = moduleAddress[:moduleAddress.find(
            r'\DataObtain.py')] + '\\' + 'rawFile' + '\\{}'.format(years)
        # 本机地址 r'C:\Users\user\Desktop\FTC\2018个人项目\数据爬虫\360doc-spider\rawFile\{}'.format(years)
        targetDir_pics = moduleAddress[:moduleAddress.find(
            r'\DataObtain.py')] + '\\' + 'rawPics' + '\\{}'.format(years)
        # 本机地址 r'C:\Users\user\Desktop\FTC\2018个人项目\数据爬虫\360doc-spider\rawPics\{}'.format(years)
        # 如果子目录尚不存在则创建一个
        if not os.path.exists(os.path.dirname(targetDir_text)):
            print('指定文件保存路径不存在,将创建指定路径')
            os.mkdir(os.path.dirname(targetDir_text))
            print('成功创建路径', os.path.dirname(targetDir_text))
        if not os.path.exists(os.path.dirname(targetDir_pics)):
            print('指定图片保存路径不存在,将创建指定路径')
            os.mkdir(os.path.dirname(targetDir_pics))
            print('成功创建路径', os.path.dirname(targetDir_pics))
        if not os.path.exists(targetDir_text):
            print('指定文件年份路径不存在,将创建指定路径')
            os.mkdir(targetDir_text)
            print('成功创建路径', targetDir_text)
        if not os.path.exists(targetDir_pics):
            print('指定图片年份路径不存在,将创建指定路径')
            os.mkdir(targetDir_pics)
            print('成功创建路径', targetDir_pics)
    except FileNotFoundError:
        print('无法创建路径')
    for urlLine in urlFile.readlines():  # 如果文件读取完毕,结束循环
        new_url = urlLine  # 从urlList文件中一行行读取目标网址
        request_initial(new_url)
        isFailed = DataProcess.dataProcess(requests(), targetDir_text,
                                           targetDir_pics)
        if isFailed == 1:
            continue
        elif isFailed == -1:
            print('由于page_source获取失败,数据处理失败')
            break
    try:
        if isFailed == -1:
            print('数据获取并处理失败')
            return -1
        elif isFailed == 1:
            print('数据获取并处理成功')
            return 1
    except isFailed == 0:
        print('未知错误')
        return 0
예제 #23
0
파일: ATTCal.py 프로젝트: FindBoat/ATTCal
def main():
    data = DataProcess.fetchData(__path__)
    users = []
    for dt in data:
        if len(dt) == 5:
            user = User(dt[0], dt[1], dt[2], dt[3], dt[4])
            users.append(user)
        else:
            print 'Data length error: %s' % str(dt)
    User.calPayment(users)
예제 #24
0
    def dataReceived(self, data):
        """
        :param data:接收的区块链文本
        :接收文本--->处理文本--->发送文本
        发送给客户端最后的文本
        """
        if data:
            content = data.decode("utf-8")
            if content[-5:].strip() != "Finsh":
                self._data_buffer = self._data_buffer + content
            else:
                content = self._data_buffer + content[:-5]
                if content[0:14] == "Initialization":
                    CurrentHeight = DealData().Initialization(content)
                    if CurrentHeight == "Lastest":
                        self.senData(CurrentHeight)
                    elif CurrentHeight:
                        self.senData("CurrentHeight" + CurrentHeight)
                    else:
                        self.senData("False")

                elif content[0:2] == "OK":
                    recontent = DealData().getData(content)
                    btexts = b"Text" + Jsonstr.toStr(recontent).encode("utf-8")
                    self.senData(btexts)

                elif content[0:7] == "Getdata":
                    contents = content.split(":")
                    count = int(contents[1]) - Conlmdb().dbsize()
                    if count == 1:
                        if DataProcess.ConfirmB(contents[2],
                                                pids["Mine"]) == "restart":
                            startMine()
                    elif count >= 2:
                        DataProcess.SyncBlock(pids["Mine"], ipaddr=self.clnt)
                    """
                    处理接收的信息
                    1.验证交易
                    2.去掉内存池交易
                    3.继续转发
                    """
                else:
                    Log.Error("Server TSS Invalid data(" + self.clnt + ")")
예제 #25
0
파일: ATTCal.py 프로젝트: FindBoat/ATTCal
def main():
    data = DataProcess.fetchData(__path__)
    users = []
    for dt in data:
        if len(dt) == 5:
            user = User(dt[0], dt[1], dt[2], dt[3], dt[4])
            users.append(user)
        else:
            print 'Data length error: %s' % str(dt)
    User.calPayment(users)
예제 #26
0
def main():
    # using Terminal to get parameters
    config = Config.Config_Table(Terminal_parser())

    # get standard train and test file
    if not os.path.exists(config.train) or not os.path.exists(config.test):
        DataProcess.get_standard_file(config)

    # get char vocab
    vocab, index = {}, 0
    vocab, index = DataProcess.get_vocab(config.train, vocab, index)
    vocab, _ = DataProcess.get_vocab(config.test, vocab, index)
    config.n_feature = len(vocab)

    # get standard train and test data
    train_data, train_target = DataProcess.get_data(config.train, vocab,
                                                    config.class_dict)
    test_data, test_target = DataProcess.get_data(config.test, vocab,
                                                  config.class_dict)

    # define HMM model
    model = Model.HMM_Cell(config)
    model.fit(train_data, train_target)
    label = model.predict_label(test_data)

    result_eval = Evaluation.Result_Eval(config)
    result_eval.eval_model(test_target, label)
def preprocess(subjectID, redact=1000, rescale=False, scaler='minmax'):
    subj_filename = './PAMAP2_Dataset/Protocol/subject10' + str(
        subjectID) + '.dat'
    col_labels = DP.col_labels
    col_sublabels = DP.col_sublabels
    HR_lim = DP.HR_lim
    HR_rest = HR_lim[subjectID][0]
    HR_max = HR_lim[subjectID][1]
    X_std = np.empty(0)

    #generate dataframe from the raw data
    data = pd.read_csv(subj_filename, sep=' ', names=col_labels, header=None)

    #linear interpolate missing data
    data = data.interpolate(method='linear')

    #drop columns for orientation and acc6g
    data = pd.DataFrame(data, columns=col_sublabels)

    #convert to array
    data = np.array(data)

    #normalize heart rate
    data[:, 2] = DP.HR_norm(data[:, 2], HR_rest, HR_max)

    #Rescale:
    if rescale:
        if scaler == 'minmax':
            SS = MinMaxScaler(feature_range=(-1, 1), copy=True)
        else:
            SS = StandardScaler(copy=True, with_mean=True, with_std=True)
        data[:, 3:] = SS.fit_transform(data[:, 3:])
        X_std = np.copy(data[:, 3:])

    #computes timestamp indices where the activity changes, including 0 and l
    l = len(data)
    r = np.arange(l - 1) + 1
    split_ind = r[data[r, 1] != data[r - 1, 1]]
    split_ind = np.concatenate(([0], split_ind, [l]))

    #chop data into chunks of continuous time blocks with the same activity, also remove activity zero
    chunks = [
        data[split_ind[i]:split_ind[i + 1]] for i in range(len(split_ind) - 1)
        if data[split_ind[i], 1] != 0
    ]

    #drop the first and last n samples. Only keep redacted samples that
    #are of sufficient length

    chunks = [x[redact:-(redact + 1)] for x in chunks if len(x) > (2 * redact)]

    return X_std, chunks
def tf_train():
    # 自动保存
    checkpoint = tf.keras.callbacks.ModelCheckpoint("checkPoint.h5",
                                                    monitor='val_accuracy',
                                                    verbose=1,
                                                    save_best_only=True)

    (train_data, train_labels) = DataProcess.extractFlagForRate("train.csv")
    (verify_data, verify_labels) = DataProcess.extractFlagForRate("verify.csv")
    verify_tuple = (verify_data, verify_labels)
    print(train_labels)
    model = tf_model()
    model.fit(train_data,
              train_labels,
              epochs=100,
              validation_data=verify_tuple,
              callbacks=[checkpoint])
    model.evaluate(verify_data, verify_labels, verbose=2)
    is_save = input("[INFO] Save model? [y]/n")
    if is_save in ['Y', 'y']:
        print("[INFO] Saving...")
        keras.models.save_model(model, "model3.h5")
        print("[INFO] Save Complete!")
    def handleCloudMongoData(self, uri, hostPort, file_data, idCollec):

        client = pym.MongoClient(uri, hostPort, connectTimeoutMS=30000, socketTimeoutMS=None, socketKeepAlive=True)

        db = client.get_database()

        print" \n DB structure:  ", db
        print" \n DB name:  ", db.name
        print" \n Collection client: ", db.client

        if idCollec == 1:        # dadosNumSensores2
            dadosNumSensores2 = db['dadosNumSensores2']
            try:
                dadosNumSensores2.insert(file_data)
            except IOError:
                print("\n\n Erro de insersao de dados na colecao dadosNumSensores2")
                self.closeCMConection()
                exit(0)
        elif idCollec == 2:      # dadosVerificSensores2
            dadosVerificSensores2 = db['dadosVerificSensores2']
            try:
                dadosVerificSensores2.insert(file_data)
            except IOError:
                print("\n\n Erro de insersao de dados na colecao dadosVerificSensores2")
                self.closeCMConection()
                exit(0)
        elif idCollec == 3: # controle2
            controle2 = db['controle2']
            try:
                controle2.insert(file_data)
            except IOError:
                print("\n\n Erro de insersao de dados na colecao controle2")
                self.closeCMConection()
                exit(0)
        else:   # recuperar o tempo de resposta: idCollec = 4
                # docum = db.get_collection('controle').find({"tempo":{$gte:5}})
            try:
                docum = db.get_collection('controle').find().pretty()
                d = dict(docum)
                tempoUser = d.get('tempo')  # recupera o valor do campo tempo
                dpo = Dp.DataProcess()
                dpo.tempoAtual = dpo.converterTempo(tempoUser)
            except IOError:
                print("\n\n Erro na selecao de documentos da colecao controle")
                self.closeCMConection()
                exit(0)
예제 #30
0
def run_epoch(sess, model, data, batch_size,writer=None,saver=None):
    global global_step
    flag = False
    test_set_size = len(data)
    # train_data = data[:len(data) - test_set_size]
    # train_x = train_data[:, :-1]
    # train_label = DataProcess.to_one_hot(train_data[:, -1], 4)
    test_data = data[len(data) - test_set_size:]
    test_x = test_data[:, :-1]
    test_label = DataProcess.to_one_hot(test_data[:, -1], 4)

    # train

    # for i in range(int((len(data) - test_set_size) / batch_size)):
    #     global_step += 1
    #     feed_dic = {}
    #     feed_dic[model.data_in.name] = train_x[i * batch_size:(i + 1) * batch_size]
    #     feed_dic[model.true_label.name] = train_label[i * batch_size:(i + 1) * batch_size]
    #     _, accu, los, merged_summ, is_nan, fc1_, grads_= sess.run([model.train_op, model.accurate, model.loss,
    #                                                   model.merged_summary, model.loss_is_nan,
    #                                                        model.fc1,model.grads], feed_dic)
    #     if is_nan:
    #         print (train_x[i * batch_size:(i + 1) * batch_size])
    #         print (train_label[i * batch_size:(i + 1) * batch_size])
    #         exit()
    #     if global_step % 10000 == 0:
    #         flag = True
    #         # print "grads", grads_[0:2]
    #         # print "fc1:",fc1_[0:4,:]
    #         # print "output_",output_[0:4,:]
    #         # print "relu_output",relu_output_[0:4,:]
    #         print ("training:   "),
    #         print ("los:%.2f, accu:%.2f" % (los, accu))
    #         writer.add_summary(merged_summ, global_step // 10000)
    #         saver.save(sess, "checkpoint_cnn_v2/modelCNN")
    # test
    summ = np.zeros([4,4],dtype=np.int16)
    for j in range(int(test_set_size / batch_size)):
        feed_dic = {}
        feed_dic[model.data_in.name] = test_x[j * batch_size:(j + 1) * batch_size]
        feed_dic[model.true_label.name] = test_label[j * batch_size:(j + 1) * batch_size]
        actual, pred = sess.run([model.true_in, model.predict], feed_dic)
        for x,y in zip(actual,pred):
            summ[x,y] += 1

    print(summ)
예제 #31
0
def fileCreater(dirPath):
    dirPath = getDirPath()
    i = None
    initialIndex = None
    outputFolder = r"\__outputs__"
    outputdirPath = dirPath + outputFolder

    if not os.path.isdir(outputdirPath):
        os.makedirs(outputdirPath)
    else:
        shutil.rmtree(outputdirPath)
        os.makedirs(outputdirPath)

    file_list = sorted(glob.glob(dirPath + '/*.txt'))
    # print("file_list",file_list)
    i = 0
    for filenamePath in file_list:
        initialIndex = ("00" + str(i + 1))[-2:] + "_"
        fileNameText = filenamePath.split("\\")[-1:][0]
        if (fileNameText == "env.txt"):
            pass
        else:
            arry = None
            with open(filenamePath, 'r') as fr:
                dp.setFileRawData(fr)
                # dp.startUnitChangeForCalcProgram()
                # dp.startCalcIonSatCurrent()
                dp.calcFloatVolt()
                # dp.showGraph()

            with open(outputdirPath + "\\" + initialIndex + fileNameText,
                      "w") as fw:
                dp.setFileName(initialIndex + fileNameText)
                arry = dp.getOutputData()
                fw.write(arry)
        i += 1
    # dp.showAllGraph()

    if (i == 0):
        root = tk.Tk()
        root.withdraw()  #小さなウィンドウを表示させない
        shutil.rmtree(outputdirPath)
        messagebox.showerror("選択エラー", "変換可能なテキストファイルがありません。")
    else:
        #完了
        # openExplorer(outputdirPath)
        pass
예제 #32
0
def main():
    # Input size of each steps
    input_size = args.num_joint * args.coord_dim

    # Loading data
    DataLoader = DataProcess.DataProcess(
        path=args.data_path,
        batch_size=args.batch_size,
        num_joint=args.num_joint,
        coord_dim=args.coord_dim,
        # input_size=input_size,
        decoder_steps=args.decoder_steps,
        model=args.model)  # TODO
    # Build graph & Train/Test
    solver = Solver()
    if args.test:
        solver.test(args=args, DataLoader=DataLoader)
    else:
        # Biuld net
        if args.model == "cnn":
            net = Model(
                name="resnet",
                layer_n=3,
                in_shape=[args.in_frames, args.num_joint, args.coord_dim],
                out_shape=[args.out_band],
                num_steps=args.epochs * DataLoader.Get_num_batch(
                    DataLoader.train_set['source'], args.in_frames),
                lr=args.learning_rate)
        elif args.model == "rnn":
            # Build Sequence to Sequence Model
            net = Model(seq_length=args.in_frames,
                        out_length=args.out_band,
                        rnn_size=args.rnn_size,
                        num_layers=args.num_layers,
                        batch_size=args.batch_size,
                        input_size=input_size,
                        decoder_steps=args.decoder_steps,
                        num_steps=args.epochs * DataLoader.Get_num_batch(
                            DataLoader.train_set['source'], args.in_frames),
                        lr=args.learning_rate)
        solver.train(args=args, DataLoader=DataLoader, net=net)
        solver.test(args=args, DataLoader=DataLoader)
예제 #33
0
def main():

    global nodes_vector

    G_igraph, G_nx, nodes_vector = dp.generate_data()
    # G = igraph.Graph.Erdos_Renyi(n=200, p=0.08)
    # aca_graph = dp.separate_data_by_academy(nodes_vector, G_igraph)

    ig_list = centrality_calculation_by_igraph(G_igraph)
    nx_list = centrality_calculation_by_networkx(
        nx.DiGraph(G_igraph.get_edgelist()))

    c_list = ig_list + nx_list  # Centrality 计算结果
    centrality_list = copy.deepcopy(c_list)

    print centrality_list

    top_ten_list, rank_list = top_rank(centrality_list, 10)

    print rank_list
    print top_ten_list
예제 #34
0
    def main(self):
        self.port.flushInput()  #start brand new
        #preInput = b'' #if current data is not complete, we wait until next cycle
        sendPCCount = 0
        while self.switch.is_set():
            preTime = time.time()

            if self.port.in_waiting > 0:

                rawInput = self.port.read_until(
                    size=45)  #todo make data length a variable
                #state,preInput,senStr = dp.dataSep(rawInput,self.senArray,self.senLock)
                state, senStr = dp.dataSepSimp(rawInput, self.senArray,
                                               self.senLock, self.senRecQue)
                if state:
                    # tryTime1 = time.time()
                    #self.senRecQue.put(self.senArray) #self.senArray is a multiprocess manager.Array, cannot be directly put into a list, thus we transform it into a normal list
                    # todo check the effect on speed
                    # tryTime2 = time.time()
                    # print(tryTime2-tryTime1)
                    sendPCCount = sendPCCount + 1
                    if sendPCCount == 4:
                        sendPCThread = th.Thread(target=self.sendPC,
                                                 args=(senStr, ))
                        sendPCThread.start()
                        sendPCCount = 0
                else:
                    pass
            else:
                print('!get nothing')
            aftTime = time.time()
            while (aftTime - preTime) < self.period:
                aftTime = time.time()
                time.sleep(0.00001)

        print('#Sensor ends')
예제 #35
0
def main():
    # using Terminal to get parameters
    config = Config.Config_Table(Terminal_parser())

    # get standard train and test file
    if not os.path.exists(config.train) or not os.path.exists(config.test):
        DataProcess.get_standard_file(config)

    # get char vocab
    vocab, index, maxLen = {}, 5, -1
    vocab, index, maxLen = DataProcess.get_vocab(config.train, vocab, index,
                                                 maxLen)
    vocab, _, maxLen = DataProcess.get_vocab(config.test, vocab, index, maxLen)
    config.n_feature = len(vocab)

    # get standard train and test data
    train_data, train_target = DataProcess.get_data(config.train, vocab,
                                                    config.class_dict, maxLen)
    test_data, test_target = DataProcess.get_data(config.test, vocab,
                                                  config.class_dict, maxLen)

    # get train loader and test loader
    train_loader = Data.get_dataloader(train_data, train_target,
                                       config.batch_size)
    test_loader = Data.get_dataloader(test_data, test_target,
                                      config.batch_size)

    # define model and optimizer
    model = Model.LSTM_Linear(config, vocab).to(config.device)
    optimizer = optim.Adam(model.parameters(), lr=0.01)

    print("Train and Test Model")
    res_eval = Evaluation.Result_Eval(config.num_classes)
    # train and test model
    for epoch in range(config.epochs):
        TrainModel(model, optimizer, train_loader, epoch, config)
        TestModel(model, test_loader, config, res_eval)
    res_eval.best_model_result()
예제 #36
0
from tensorflow.python.keras.models import load_model

import DataProcess
from CallBack import LossHistory

model = load_model('my_model_0.h5')
model.summary()
x_train, y_train, x_test, y_test, class_names = DataProcess.readData(
    path='D:\\迅雷下载\\npy', start=10001, each_count=12000)

history = LossHistory()
model.fit(x_train, y_train, batch_size=40, epochs=10, callbacks=[history])

model.save('my_model_1.h5')
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
history.loss_plot('epoch')
예제 #37
0
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)

for epoch in range(10):
    running_loss = 0.0
    print("epoch : (%d)" % (epoch))
    for i, sentence in enumerate(D.training_data):
        # clear gradients for each instance
        model.zero_grad()

        # init model's hidden
        model.EMB_hidden = model.init_hidden('emb')
        model.POS_hidden = model.init_hidden('pos')

        # make our target ready
        sentence_in = D.make_index_list(sentence, 'word')
        targets = D.make_index_list(sentence, 'tag')
        char_in = D.make_index_list(sentence, 'char')

        # feed the model
        tag_scores = model(sentence_in, char_in)

        # Compute the Loss, Gradients, and update the parameters
        loss = loss_function(tag_scores, targets)
        loss.backward()
        optimizer.step()

        # Print the result
        running_loss += loss.data[0]
        if i % 200 == 0:
            print('[%d] epoch, [%5d] Steps - loss : %.3f' %
예제 #38
0
def upload_file():
    '''
    解析上传文件,构造图
    '''

    global G_igraph, G_nx, nodes_vector, aca_graph

    if request.method == 'POST':

        uploaded_file = request.files['file']

        if uploaded_file and allowed_file(uploaded_file.filename):

            filename = secure_filename(uploaded_file.filename)

            # 保存上传文件到服务器
            uploaded_file.save(
                os.path.join(app.config['UPLOAD_FOLDER'], filename))

            file_path = app.config['UPLOAD_FOLDER'] + uploaded_file.filename

            # 第一次遍历文件,找出编号最大的节点
            with open(file_path, 'r') as f:

                max_num = 0

                graph_nodes = []

                for line in f:

                    if line and not line.strip().startswith('#'):

                        from_node_str, to_node_str = line.rsplit(None, 1)

                        from_node = map(int, from_node_str.split(','))
                        to_node = map(int, to_node_str.split(','))

                        from_node_id = from_node[0]
                        to_node_id = to_node[0]

                        # 判断节点是否在节点集合里
                        if from_node_id not in graph_nodes:

                            graph_nodes.append(from_node_id)

                        if to_node_id not in graph_nodes:

                            graph_nodes.append(to_node_id)

                        if from_node_id > max_num or to_node_id > max_num:

                            max_num = max(from_node_id, to_node_id)

                nodes_vector = [0] * (max_num + 1)

                G_igraph.add_vertices(graph_nodes)  # 添加节点集合到图里

                f.seek(0)  # 返回文件头

                # 再次遍历文件,填充 nodes_vector,构造 G_igraph
                for line in f:

                    if line and not line.strip().startswith('#'):

                        from_node_str, to_node_str = line.rsplit(None, 1)

                        from_node = map(int, from_node_str.split(','))
                        to_node = map(int, to_node_str.split(','))

                        from_node_id = from_node[0]
                        from_node_vector = from_node[1:]

                        to_node_id = to_node[0]
                        to_node_vector = to_node[1:]

                        # 添加边
                        G_igraph.add_edge(from_node_id, to_node_id)

                        nodes_vector[from_node_id] = from_node_vector
                        nodes_vector[to_node_id] = to_node_vector

        G_nx = nx.DiGraph(G_igraph.get_edgelist())

        aca_graph = dp.separate_data_by_academy(nodes_vector, G_igraph)

    return render_template('main.html')
예제 #39
0
def top_students(req_id):
    '''
    处理名人推荐、学院搜索

    ----------
    parameter
      req_id 请求编号

    return
      前端需要的数据
    '''

    global G_igraph, G_nx, nodes_vector, aca_graph, data

    paramas = request.args  # GET 请求参数

    if req_id == 0:  # 渲染 Centrality 页面

        return render_template('new_centrality.html')

    elif req_id == 1:  # 请求全校学生节点中心度值

        ig_list = c.centrality_calculation_by_igraph(G_igraph)
        nx_list = c.centrality_calculation_by_networkx(G_nx)

        graph_json = dp.graph_json(G_nx, nodes_vector)  # 图信息

        c_list = ig_list + nx_list  # Centrality 计算结果

        # c_list = c.test(G_nx)  # 测试:只使用 networkx 进行计算

        # 对 Centrality 计算结果进行深度拷贝,用于计算 Top-N
        centrality_list = copy.deepcopy(c_list)

        top_n_list, rank_list = top_rank(
            centrality_list,
            min(10, len(G_nx.nodes())))

        data = {'Centrality': c_list,
                'Graph': graph_json,
                'Rank': rank_list}  # 封装成 dict

        return make_response(json.dumps(data))

    elif req_id == 2:  # 请求某个院学生节点中心度值

        aca_id = int(paramas['search']) - 1  # 参数:学院 ID

        ig_list = c.centrality_calculation_by_igraph(aca_graph[aca_id])
        nx_list = c.centrality_calculation_by_networkx(
            nx.DiGraph(aca_graph[aca_id].get_edgelist()))

        graph_json = dp.graph_json(
            nx.DiGraph(aca_graph[aca_id].get_edgelist()), nodes_vector)  # 图信息

        c_list = ig_list + nx_list  # Centrality 计算结果

        # c_list = c.test(G_nx)

        centrality_list = copy.deepcopy(c_list)

        top_ten_list, rank_list = top_rank(centrality_list, 10)

        print rank_list
        print top_ten_list

        data = {'Centrality': c_list,
                'Graph': graph_json,
                'Rank': rank_list}  # 封装成 dict

        return make_response(json.dumps(data))

    elif req_id == 3:  # 请求某个学生节点中心度值

        stu_id = int(paramas['search'])  # 参数:学生 ID

        ig_list = c.centrality_calculation_by_igraph(G_igraph)
        nx_list = c.centrality_calculation_by_networkx(G_nx)

        graph_json = dp.graph_json(G_nx, nodes_vector)  # 图信息

        c_list = ig_list + nx_list  # Centrality 计算结果

        # c_list = c.test(G_nx)

        centrality_list = copy.deepcopy(c_list)

        rank_list = []

        for c_dict in centrality_list:  # 构造 Rank

            c_dict[c_dict.keys()[0]] = []

            rank_list.append(c_dict)

        stu_centrality_list = []

        # 取出所有计算该节点 Centrality 算法中对应的值,并以 key-value 存储
        for centrality in c_list:

            stu_centrality = {}

            centrality_key = centrality.keys()[0]
            centrality_value = centrality.values()[0][stu_id]

            stu_centrality[centrality_key] = centrality_value

            stu_centrality_list.append(stu_centrality)

        data = {'StudentCentrality': stu_centrality_list,
                'Centrality': c_list,
                'Graph': graph_json,
                'Rank': rank_list}  # 封装成 dict

        return make_response(json.dumps(data))
예제 #40
0
for i in total_step:
    
    target_st_time = datetime.datetime(year, 1, 1, hh, mm) + datetime.timedelta(days=st_doy-1)
    target_time = target_st_time + datetime.timedelta(minutes = run_step*i) - datetime.timedelta(minutes = time_delay)
    gim_time    = target_time - datetime.timedelta(days = gim_time_set)   
 
    target_year = target_time.year
    target_doy  = (datetime.datetime(target_year, target_time.month, target_time.day) - datetime.datetime(target_year, 1, 1)).days + 1
    target_hh = target_time.hour
    target_mm = target_time.minute

    gim_time_doy  = (datetime.datetime(gim_time.year, gim_time.month, gim_time.day) - datetime.datetime(gim_time.year, 1, 1)).days + 1 
    os.chdir(main_pwd)
 
    # create directory and copy needed file () to save_pwd
    DataProcess.preprocess(target_year, target_doy, save_pwd, download_list_fn)

    #####    get GIM data
    if stnbias_method=='GIM':
        gim = Getdata.GimData(gim_time.year, gim_time_doy)
        gim.get_gimdata()
        gim.create_mapfile()
    if download_type=='IGSRT':
        gim.read_station_bias()
        shutil.copy('bias{0}{1:03}.dat'.format(gim_time.year, gim_time_doy) ,'bias{0}{1:03}.dat'.format(target_year, target_doy))

    ####    download GPS file
    gpsfile = Getdata.GPSourceFile(target_year, target_doy, target_hh, target_mm)
    gpsfile.download_data(download_type, download_list_fn)
    gpsfile.decompress_file()
    gpsfile.d2o()
예제 #41
0
           31: 'tree g r',
           32: 'tree e r',
           33: 'tree g b',
           34: 'tree e b',
           41: 'NB Gaussian',
           42: 'NB Multi',
           51: 'KNN uniform',
           52: 'KNN distance',
           }
# 6 define a black list for model result recordings

### import packages
## 1 import data Tdata
import numpy as np
import DataProcess as dp
Tdata = dp.trainingData()

## 3 import packagees for models
from sklearn import svm
from sklearn import linear_model
from sklearn import tree
from sklearn import naive_bayes
from sklearn import neighbors
# 4 for cross validation
from sklearn import cross_validation
from sklearn.metrics import accuracy_score

import time

# For Reference
# LabelDic = {'ang': 1, 'fru' : 2, 'sad' : 3, 'hap' : 4, 'neu' : 5, 'xxx' : 6 , 'sur' : 7, 'exc' : 8, 'dis' : 9, 'fea' : 10, 'oth': 11} # mapping of label to scalar category