示例#1
0
    def dataRefresh(self):
        """数据更新按钮事件,重新获取数据并处理,并更新页面显示数据"""
        getData.init()
        getData.get_data()
        dataProcess()
        # 取得最新更新日期
        with open('cache.txt') as f:
            last = f.readlines()[0]
        df = pd.read_csv('data/total.csv')

        earlyData = df.iloc[len(df) - 2, :]  # 前一天数据
        lastData = df.iloc[len(df) - 1, :]  # 最新数据

        # 最新现存确诊
        lastRemain = lastData[1] - lastData[2] - lastData[3] + lastData[4] - lastData[5] - lastData[6]
        # 前一天现存确诊
        earlyRemain = earlyData[1] - earlyData[2] - earlyData[3] + earlyData[4] - earlyData[5] - earlyData[6]

        # 更新显示数据
        self.lastRemainLabel.config(text=f'现存确诊:{lastRemain}')
        self.lastDateLabel.config(text=f'最新数据更新日期:{last}')
        self.lastConfirmLabel.config(text=f'确诊:{lastData[1] + lastData[4]}')
        self.lastCuredLabel.config(text=f'治愈:{lastData[2] + lastData[5]}')
        self.lastDeadLabel.config(text=f'死亡:{lastData[3] + lastData[6]}')

        if lastRemain - earlyRemain > 0:
            self.remainIncrease.config(text=f'现存确诊较昨日增加:{lastRemain - earlyRemain}')
        else:
            self.remainIncrease.config(text=f'现存确诊较昨日减少:{earlyRemain - lastRemain}')
        self.ConfirmIncrease.config(text=f'确诊较昨日增加:{lastData[1] + lastData[4] - earlyData[1] - earlyData[4]}')
        self.CuredIncrease.config(text=f'治愈较昨日增加:{lastData[2] + lastData[5] - earlyData[2] - earlyData[5]}')
        self.DeadIncrease.config(text=f'死亡较昨日增加:{lastData[3] + lastData[6] - earlyData[3] - earlyData[6]}')
def main():
    # One spark session to join them all
    conf = SparkConf()
    conf.set('spark.executorEnv.PGHOST', os.environ['PGHOST'])
    conf.set('spark.executorEnv.PGUSER', os.environ['PGUSER'])
    conf.set('spark.executorEnv.PGPASSWORD', os.environ['PGPASSWORD'])
    spark = SparkSession.builder             \
                        .appName("timeJoin") \
                        .config(conf=conf)   \
                        .getOrCreate()

    spark.sparkContext.addPyFile("postgres.py")
    spark.sparkContext.addPyFile("globalVar.py")
    spark.sparkContext.addPyFile("getTaxiFields.py")
    spark.sparkContext.addPyFile("datetimeTools.py")
    spark.sparkContext.addPyFile("appendWeatherData.py")
    spark.sparkContext.addPyFile("dataProcessing.py")

    # Years ond months of interest: n-years back from current year
    nOfYears = glb('nOfPassYears')
    currYear = datetime.now().year
    yearList = [str(cnt + currYear - nOfYears + 1) for cnt in range(nOfYears)]
    months = [str(val + 1).zfill(2) for val in range(12)]

    # Create an object for every taxi data file
    # Make sure to remove object if file does not exist
    ptr = 0
    dataObj = []
    for yr in yearList:
        for mn in months:
            dataObj.append(dataProcess(yr, mn))
            if not dataObj[ptr].hasData():
                del dataObj[ptr]
            else:
                ptr = ptr + 1

    # Start calling methods in dataProcessing.py
    for dProp in dataObj:
        dProp.readData(spark)  # Read data
        dProp.addTimestamp()  # Convert string to timestamp
        dProp.addWthrStationID()  # Add weather station ID
        dProp.joinTables(spark)  # Main join process
        dProp.writeToPostgres('yellow')  # Write to DB with prefix 'yellow'
        #dProp.printCheck()

    spark.stop()
示例#3
0
import pandas as pd
import numpy as np

from dataProcessing import loadData, dataProcess
from plotting import TrainingPlot
from Models import createLSTMModel

train, test, submission, items, itemCategory, shops = loadData()

dataset = dataProcess(train, test)

# split into training set and test set
X = np.expand_dims(dataset.values[:, :-1], axis=2)
y = dataset.values[:, -1:]

X_test = np.expand_dims(dataset.values[:, 1:], axis=2)

# normalised_X = dataScaling(X)
# normalised_Y = dataScaling(y)

# normalised_X = torch.FloatTensor(normalised_X).view(-1)
# normalised_Y = torch.FloatTensor(normalised_Y).view(-1)

# timestep = 30
# normalisedTrainData_timebased = createTimeSeries(normalised_X, normalised_Y, timestep)
# print(normalisedTrainData_timebased[:5])

filename = 'output/training_plot.jpg'
plot_losses = TrainingPlot()

model = createLSTMModel()
            key = best_left_word_dict[key][0]
        return result

    def saveResult(self,result_list,path):

        for i in range(len(result_list)):
            with open(path,'a') as f:
                f.write(result_list[i])
                f.write('\n')



if __name__ == '__main__':


    sentence_list = dataProcessing.dataProcess().getTestData()
    for sentence in sentence_list:
        print(Segment.sentenceCut(sentence))


    _, words_dict = dataProcessing.dataProcess().readWordsDict(
        config.wordList_1998_path)
    words_pair_dict = dataProcessing.dataProcess().readWordsPairDict(
        config.wordPairList_path_1998)

    result_list = []
    sentence = '欢乐热闹的气氛已悄悄降临'
    print(Segment.getChineseSegment(sentence,words_dict,words_pair_dict))

    for sentences in sentence_list:
        sentence_cut = Segment.sentenceCut(sentences)