Beispiel #1
0
def app_principal():
    """ This is the main method of application.
     It start this app and control it
    """
    start = dp.DataProcess()
    t_a, h_a, h_s = start.data_in()
    start.define_disease_table_data(t_a, h_a, h_s)
Beispiel #2
0
    def setDfMonthData(self, startDate=None, endDate=None):
        dpMonthObj = dp.DataProcess(self.code, self.name, period=dp.MONTH)
        dpMonthObj.readData()
        self.dpMonthObj = dpMonthObj
        if (os.path.exists(dpMonthObj.dataGenCsvFile)):
            self.dfMonthGenData = pd.read_csv(dpMonthObj.dataGenCsvFile,
                                              encoding=sd.UTF_8,
                                              dtype={'code': str})
            self.dfMonthFilterData = self.filterData(self.dfMonthGenData,
                                                     startDate, endDate)
        else:
            print(
                '[Function:%s line:%s stock:%s] Error: File %s is not exist' %
                (self.setDfWeekData.__name__, sys._getframe().f_lineno,
                 self.code, dpMonthObj.dataGenCsvFile))
            sys.exit()

        if (os.path.exists(dpMonthObj.signalReportFile)):
            self.dfMonthSignalData = pd.read_csv(dpMonthObj.signalReportFile,
                                                 encoding=sd.UTF_8,
                                                 dtype={'code': str})
        else:
            print(
                '[Function:%s line:%s stock:%s] Error: File %s is not exist' %
                (self.setDfWeekData.__name__, sys._getframe().f_lineno,
                 self.code, dpMonthObj.signalReportFile))
            sys.exit()

        self.reportPath = dpMonthObj.dataPath + self.code + '_exchange_report.csv'
    def handleCloudMongoData(self, uri, hostPort, file_data, idCollec):

        client = pym.MongoClient(uri, hostPort, connectTimeoutMS=30000, socketTimeoutMS=None, socketKeepAlive=True)

        db = client.get_database()

        print" \n DB structure:  ", db
        print" \n DB name:  ", db.name
        print" \n Collection client: ", db.client

        if idCollec == 1:        # dadosNumSensores2
            dadosNumSensores2 = db['dadosNumSensores2']
            try:
                dadosNumSensores2.insert(file_data)
            except IOError:
                print("\n\n Erro de insersao de dados na colecao dadosNumSensores2")
                self.closeCMConection()
                exit(0)
        elif idCollec == 2:      # dadosVerificSensores2
            dadosVerificSensores2 = db['dadosVerificSensores2']
            try:
                dadosVerificSensores2.insert(file_data)
            except IOError:
                print("\n\n Erro de insersao de dados na colecao dadosVerificSensores2")
                self.closeCMConection()
                exit(0)
        elif idCollec == 3: # controle2
            controle2 = db['controle2']
            try:
                controle2.insert(file_data)
            except IOError:
                print("\n\n Erro de insersao de dados na colecao controle2")
                self.closeCMConection()
                exit(0)
        else:   # recuperar o tempo de resposta: idCollec = 4
                # docum = db.get_collection('controle').find({"tempo":{$gte:5}})
            try:
                docum = db.get_collection('controle').find().pretty()
                d = dict(docum)
                tempoUser = d.get('tempo')  # recupera o valor do campo tempo
                dpo = Dp.DataProcess()
                dpo.tempoAtual = dpo.converterTempo(tempoUser)
            except IOError:
                print("\n\n Erro na selecao de documentos da colecao controle")
                self.closeCMConection()
                exit(0)
Beispiel #4
0
def main():
    # Input size of each steps
    input_size = args.num_joint * args.coord_dim

    # Loading data
    DataLoader = DataProcess.DataProcess(
        path=args.data_path,
        batch_size=args.batch_size,
        num_joint=args.num_joint,
        coord_dim=args.coord_dim,
        # input_size=input_size,
        decoder_steps=args.decoder_steps,
        model=args.model)  # TODO
    # Build graph & Train/Test
    solver = Solver()
    if args.test:
        solver.test(args=args, DataLoader=DataLoader)
    else:
        # Biuld net
        if args.model == "cnn":
            net = Model(
                name="resnet",
                layer_n=3,
                in_shape=[args.in_frames, args.num_joint, args.coord_dim],
                out_shape=[args.out_band],
                num_steps=args.epochs * DataLoader.Get_num_batch(
                    DataLoader.train_set['source'], args.in_frames),
                lr=args.learning_rate)
        elif args.model == "rnn":
            # Build Sequence to Sequence Model
            net = Model(seq_length=args.in_frames,
                        out_length=args.out_band,
                        rnn_size=args.rnn_size,
                        num_layers=args.num_layers,
                        batch_size=args.batch_size,
                        input_size=input_size,
                        decoder_steps=args.decoder_steps,
                        num_steps=args.epochs * DataLoader.Get_num_batch(
                            DataLoader.train_set['source'], args.in_frames),
                        lr=args.learning_rate)
        solver.train(args=args, DataLoader=DataLoader, net=net)
        solver.test(args=args, DataLoader=DataLoader)
Beispiel #5
0
    def __init__(self):
        # Instância  a classe que irá ler os arquivos de entrada do programa como stop-word e tags
        inputFiles = ReadInputFiles.ReadInputFiles()

        # Instância da classe generateID  que gera os id para os registros a serem adicionados no banco
        generateID = GenerateID.GenerateID()

        # Chama o método que cria todas os dicionários com os valores.
        inputFiles.mountDicts()

        writeLogFile = WriteLogFile.WriteLogFile()

        #Cria uma instância da classe Bolsa DAO, a qual fica realizando as consultas no banco
        bolsa = BolsaDAO.BolsaDAO(writeLogFile)

        pool_sema = threading.BoundedSemaphore(value=1)

        self.generateID = GenerateID.GenerateID()

        dicionario = Dict.Dict(bolsa, pool_sema, writeLogFile)

        numberOfThread = 4

        #Pega a quantidade de linhas do banco de dados
        rowCount = bolsa.getTableRowCount()

        areaDeConhecimento = self.montaHashAreaCohecimento(bolsa)

        # A quantidade de registros que serão trazidos do banco de dados por vez.
        offset = 5000
        # realiza a quantidade de vez para que se rode o algoritmo no banco todo.
        cycles = math.ceil(rowCount / offset)

        i = inputFiles.readOnFileCycle()

        inputFiles.readOnFileHashWord(dicionario.wordDict)
        inputFiles.readOnFileHashStem(dicionario.stemDict)

        for i in range(0, cycles):
            #pegando as 1000 registros do banco de cada vez
            rows = bolsa.getBolsaOffsetLimit(offset, offset * i)
            threads = []
            k = 0
            while k < len(rows):
                for l in range(0, numberOfThread):
                    if k < len(rows):
                        thread = DataProcess.DataProcess(
                            inputFiles.stopWordBannedList,
                            inputFiles.stopWordConnectList,
                            inputFiles.tagTrigram, inputFiles.tagBigram, bolsa,
                            rows[k], pool_sema, self.generateID,
                            areaDeConhecimento, dicionario)
                        threads.append(thread)
                        thread.start()
                        k = k + 1
                for l in range(0, len(threads)):
                    threads[l].join()
                del threads[:]
            print("..::Já foram processadas ->" + str(offset * (i + 1)))
            bolsa.mysql.commit()
            if i > 0 and i % 10 == 0:
                dicionario.saveStemAndWordDict(i)
        #fechando os arquivos de log que contem os erros de insercao de cada tabela
        bolsa.fileSource.close()
        bolsa.fileWord.close()
        bolsa.fileStem.close()
        dicionario.saveStemAndWordDict(i)
Beispiel #6
0
import glob
import os
import shutil
import numpy as np
import subprocess
import easygui
import tkinter as tk
from tkinter import messagebox

#local
import DataProcess as dp
dp = dp.DataProcess()


def main():
    dirPath = r"C:\Users\紅林亮平\Desktop\【Input to excel】"
    fileCreater(dirPath)


def openExplorer(path):
    FILEBROWSER_PATH = os.path.join(os.getenv('WINDIR'), 'Explorer.exe')
    subprocess.run([FILEBROWSER_PATH, '/select,', os.path.normpath(path)])


def getDirPath():
    return easygui.diropenbox(title="変換するフォルダを選択")


# def forDemoRmOutputDir():

Beispiel #7
0
                  [-8*h**3,  -h**3,      0,   h**3, 8*h**3],
                  [16*h**4,   h**4,      0,   h**4,16*h**4]])
    if args.degree == 1:
        # First Derivative
        B = np.array([0, 1, 0, 0, 0])
    else:
        # Second Derivative
        B = np.array([0, 0, 2, 0, 0])
        
    X = np.linalg.solve(A, B)


    # Loading data
    DataLoader = DataProcess.DataProcess(path=args.data_path, 
                                         batch_size=args.batch_size, 
                                         num_joint=args.num_joint,
                                         coord_dim=args.coord_dim,
                                         decoder_steps=args.decoder_steps) # TODO
    
    print(DataLoader.valid_set['source']['squat_front'].shape)
    data = DataLoader.valid_set['source']['squat_front'][:,0,1]
    result = np.zeros((len(data)-4,), np.float32)
    print(data.shape, result.shape)

    for i in range(len(result)):
        mini_data = data[i:i+5]
        # Curve fitting
        # A.T * A * X = A.T * B
        # CA = np.array([[1, -2, (-2)**2, (-2)**3],
                       # [1, -1, (-1)**2, (-1)**3],
                       # [1,  0, ( 0)**2, ( 0)**3],
Beispiel #8
0
from DataProcess import *
from XGBoostModel import *
from FeatureEngineering import *
from TimeCost import *

if __name__ == '__main__':

    mode = Const.VALID_MODE
    mode = Const.PREDICT_MODE
    tc = TimeCost()

    dp = DataProcess(mode)
    df, dft = dp.data_input(Const.TRAIN_FILE_NAME, Const.TEST_FILE_NAME)
    tc.print_event()

    fe = FeatureEngineering(mode)
    df, dft = fe.feature_process(df, dft)
    tc.print_event()

    X_train, y_train, X_valid, y_valid, X_test = dp.get_split_data(df, dft)
    tc.print_event()

    xgb = XGBoostModel(mode)
    result = xgb.train_model(X_train, y_train, X_valid, y_valid, X_test)
    tc.print_event()

    if not mode:
        dp.transform_index(result)

    # xgb.load_model()
Beispiel #9
0
                s=5,
                color=(0, 1, 0),
                label='Recall')
    plt.scatter(EPOCHS,
                F_1_t_n,
                marker='o',
                s=5,
                color=(0, 0, 1),
                label='F1-Measure')
    plt.legend()
    plt.show()


if __name__ == '__main__':
    # 分别处理训练集、验证集、测试集文件
    DataProcess.DataProcess(train, train_out)
    DataProcess.DataProcess(validation, validation_out)
    DataProcess.DataProcess(test, test_out)
    # 将原始词向量文件精简
    VecPre.VecPre(wordvector, wordvector_out)
    # 读取训练集、验证集、测试集
    train_set = open(train_out, encoding='utf-8').readlines()
    validation_set = open(validation_out, encoding='utf-8').readlines()
    test_set = open(test_out, encoding='utf-8').readlines()
    # 读取词向量
    word_vector = open(wordvector_out, encoding='utf-8').readlines()
    # 建立词向量的索引
    vector_to_idx = {}
    for i in range(len(word_vector)):
        vector_to_idx[word_vector[i].split()[0]] = len(vector_to_idx)
    # 建立词的索引
Beispiel #10
0
        f.write('\n'.join(sys.argv[1:]))
    # save information
    now = datetime.datetime.now()
    current_time = '{:04d}_{:02d}_{:02d}_{:02d}{:02d}{:02d}\n'.format(
        now.year, now.month, now.day, now.hour, now.minute, now.second)
    with open('./model/info.txt', 'w') as out_file:
        out_file.write(current_time)
        out_file.write(args.info)

if __name__ == '__main__':
    # Input size of each steps
    input_size = args.num_joint * args.coord_dim

    # Loading data
    DataLoader = DataProcess.DataProcess(path=args.data_path,
                                         batch_size=args.batch_size,
                                         input_size=input_size,
                                         decoder_steps=args.decoder_steps)
    if not args.test:
        train_batch_generator = DataLoader.Batch_Generator(
            DataLoader.train_set['source'], DataLoader.train_set['target'],
            args.in_frames, args.out_band)
        (_, _, num_batch) = next(train_batch_generator)

        # Define graph
        train_graph = tf.Graph()
        with train_graph.as_default():

            # Get placeholder
            (input_data, targets, keep_rate) = SegmentModel.get_inputs()

            # Build Sequence to Sequence Model