Esempio n. 1
0
def series_mod(**kwargs):
    ## 画图类型
    _picType = yamlDict.get('picType')

    ## hive查询数据
    _hiveDB = lengthline.hiveDB or hive_conn.hiveDB
    _hiveTable = lengthline.hiveTable or hive_conn.hiveTable

    ## wds数据分支名称
    _branchName = kwargs.get('branchName')

    ## 获取索引列表
    _textTagList = yamlDict.get(_branchName).get('textTagList')
    _divUnit = yamlDict.get(_branchName).get('divUnit') or 1
    _filterKeyList = [w.get('wds') for w in _textTagList]

    ## HQL查询语句
    if len(_filterKeyList) == 1:
        _HQL = "SELECT wds,year,value FROM `{}`.`{}` WHERE wds='{}' ORDER BY year".format(
            _hiveDB, _hiveTable, _filterKeyList[0])
    elif len(_filterKeyList) > 1:
        _HQL = "SELECT wds,year,value FROM `{}`.`{}` WHERE wds IN {} ORDER BY year".format(
            _hiveDB, _hiveTable, tuple(_filterKeyList))

    ## 数据查询
    _resultList = spark_hive_query(HQL=_HQL)

    ## 获取查询数据年份列表并去重不排序
    _yearList = [x.year for x in _resultList]
    _yearList = list_uniq(_yearList)

    ## 计算series列表
    _seriesList = []
    for _x in _textTagList:
        _perList = [
            per_list(_y, _x.get('wds'), _resultList, _divUnit)
            for _y in _yearList
        ]
        _seriesList.append({
            'name': _x.get('name'),
            'type': _picType,
            'data': _perList
        })

    ## 返回数据
    return {'year': _yearList, 'series': _seriesList}
Esempio n. 2
0
def main():
    ## 获取yaml自定义画图数据
    textTagList = yamlDict.get('textTagList')
    colorList = yamlDict.get('colorList')
    yUnit = yamlDict.get('yUnit')
    divUnit = yamlDict.get('divUnit')
    picType = yamlDict.get('picType')

    ##获取hive连接信息
    hiveDB = portTEU.hiveDB or hive_conn.hiveDB
    hiveTable = portTEU.hiveTable or hive_conn.hiveTable

    ## 数据列表
    dataList = []
    for w in textTagList:
        ## 查询条件wds
        filterKeys = w.get('code')
        ## 查询语句
        HQL = "SELECT year,value FROM {db}.{tb} WHERE wds='{fk}' \
             AND year=(SELECT MAX(year) FROM {db}.{tb} WHERE wds='{fk}' AND value!=0)".format(
            db=hiveDB, tb=hiveTable, fk=filterKeys)
        ## Hive查询
        result = spark_hive_query(HQL=HQL)
        ## 查询结果换算
        totalValue = round(result[0][1] / divUnit, 2)
        ## 查询结果添加到数据列表
        dataList.append({'name': w.get('name'), 'value': totalValue})

    ## 定义排序索引的键值
    sortKey = 'value'
    ## 取出画图列表
    drawList = semicircle(dataList, sortKey)

    ##标签列表
    labelList = [x.get('name') for x in drawList]

    ## 生成输出字典
    outputDict = {'color': colorList, 'label': labelList, 'data': drawList}

    ## 转json字符串,不转码
    outputStr = json.dumps(outputDict, ensure_ascii=False)

    ## 存储结果到mysql数据库
    mysql_update(dataLabel=portTEU.label, dataValues=outputStr)
Esempio n. 3
0
def series_mod(**kwargs):
    ## 通过参数获取分支名称
    _branchName = kwargs.get('branchName')

    ## 获取对应分支数据在yaml数据中
    _textTagList = yamlDict.get(_branchName).get('textTagList')
    _divUnit = yamlDict.get(_branchName).get('divUnit') or 1
    _filterKeyList = [w.get('wds') for w in _textTagList]

    ## HQL查询语句
    if len(_filterKeyList) == 1:
        _HQL = "SELECT wds,year,value FROM `{}`.`{}` WHERE wds='{}' ORDER BY year".format(
            hiveDB, hiveTable, _filterKeyList[0])
    elif len(_filterKeyList) > 1:
        _HQL = "SELECT wds,year,value FROM `{}`.`{}` WHERE wds IN {} ORDER BY year".format(
            hiveDB, hiveTable, tuple(_filterKeyList))

    ## 结果查询
    _resultList = spark_hive_query(HQL=_HQL)

    ## 获取年份列表并去重
    _yearList = [x.year for x in _resultList]
    _yearList = list_uniq(_yearList)

    ## 统计series列表
    _seriesList = []
    for x in _textTagList:
        _perList = [
            per_list(y, x.get('wds'), _resultList, _divUnit) for y in _yearList
        ]
        _seriesList.append({
            'name': x.get('name'),
            'type': picType,
            'data': _perList
        })

    ## 返回年份列表和series列表
    return {'year': _yearList, 'series': _seriesList}
Esempio n. 4
0
def main():
    ## 获取yaml数据
    textTagList = yamlDict.get('textTagList')
    colorMin = yamlDict.get('colorMin')
    colorMax = yamlDict.get('colorMax')
    titleName = yamlDict.get('titleName')
    subName = yamlDict.get('subName')
    subLink = yamlDict.get('subLink')
    yUnit = yamlDict.get('yUnit')
    divUnit = yamlDict.get('divUnit')
    picType = yamlDict.get('picType')

    ##获取hive连接信息
    hiveDB = railwayfreight.hiveDB or hive_conn.hiveDB
    hiveTable = railwayfreight.hiveTable or hive_conn.hiveTable

    ## 关键词列表
    indexList = [w.get('name') for w in textTagList]

    ## 坐标列表
    axisList = [jsonDict.get(x) for x in indexList]

    ## 坐标输出字典
    axisDict = dict(zip(indexList, axisList))

    ## 数据列表
    dataList = []
    yearList = []
    for w in textTagList:
        filterKeys = w.get('code')
        HQL = "SELECT year,value FROM {db}.{tb} WHERE wds='{fk}' \
             AND year=(SELECT MAX(year) FROM {db}.{tb} WHERE wds='{fk}' AND value!=0)".format(
            db=hiveDB, tb=hiveTable, fk=filterKeys)
        result = spark_hive_query(HQL=HQL)
        totalValue = round(result[0][1] / divUnit, 2)
        yearList.append(result[0][0])
        dataList.append({'name': w.get('name'), 'value': totalValue})

    ## 年份列表去重
    yearList = list(set(yearList))

    ## 修正标题名称
    if len(yearList) == 1:
        titleName = str(
            max(yearList)) + '年' + titleName + '(单位: ' + yUnit + ')'
    else:
        titleName = str(min(yearList)) + '-' + str(
            max(yearList)) + '年' + titleName + '(单位: ' + yUnit + ')'

    ## 取最大值和最小值
    _max = max(x.get('value') for x in dataList)
    _min = min(x.get('value') for x in dataList)

    ## 生成输出字典
    outputDict = {
        'name': titleName,
        'subname': subName,
        'sublink': subLink,
        'colorMax': colorMax,
        'colorMin': colorMin,
        'max': _max,
        'min': _min,
        'axis': axisDict,
        'data': dataList
    }

    ## 转json字符串,不转码
    outputStr = json.dumps(outputDict, ensure_ascii=False)

    ## 存储结果到mysql数据库
    mysql_update(dataLabel=railwayfreight.label, dataValues=outputStr)
Esempio n. 5
0
def main():
    ## 定义相关标签及输出格式
    textTagList = yamlDict.get('textTagList')
    legendList = [x.get('name') for x in textTagList]
    colorList = yamlDict.get('colorList')
    titleName = yamlDict.get('titleName')
    yUnit = yamlDict.get('yUnit')
    divUnit = yamlDict.get('divUnit') or 1
    picType = yamlDict.get('picType')

    ## 定义数据库名称,表名称及查询语句
    ## 备注: 测试环境性能太差,为节省时间,这里直接查询出运输业人数总数据。
    hiveDB = lengthtraveller.hiveDB or hive_conn.hiveDB
    hiveTable = lengthtraveller.hiveTable or hive_conn.hiveTable

    ## 设置一个列表收集wds值
    keyList = [e.get('wds') for e in textTagList]
    ## wds集合转元组用于HQL查询
    keyTuple = tuple(keyList)

    ## HQL查询语句
    if len(keyTuple) == 1:
        HQL = "SELECT wds,year,value FROM `{}`.`{}` WHERE wds='{}'".format(
            hiveDB, hiveTable, keyTuple[0])
    elif len(keyTuple) > 1:
        HQL = "SELECT wds,year,value FROM `{}`.`{}` WHERE wds IN {}".format(
            hiveDB, hiveTable, keyTuple)

    ## 获取查询结果,输出列表,详情查看functions脚本spark_hive_query模块
    resultList = spark_hive_query(HQL=HQL)

    ## 获取年份列表
    yearList = [y.year for y in resultList]

    ## 年份列表去重
    yearList = list_uniq(yearList)

    ## 年份类别排序
    list.sort(yearList)

    ## 定义series列表
    seriesList = []
    for x in textTagList:
        ## 获取dfwds.code的数据列表
        perList = [
            per_list(y, x.get('wds'), resultList, divUnit) for y in yearList
        ]
        ## 数据汇总列表修正(汇总列表-dfwds.code列表)
        seriesList.append({
            'name': x.get('name'),
            'type': picType,
            'data': perList
        })

    ## 定义输出格式(字典)
    outputDict = {
        'title': titleName,
        'legend': legendList,
        'color': colorList,
        'xlabel': yearList,
        'yname': yUnit,
        'series': seriesList
    }
    ## 转json字符串,不转码
    outputStr = json.dumps(outputDict, ensure_ascii=False)

    ## 存储结果到mysql数据库
    mysql_update(dataLabel=lengthtraveller.label, dataValues=outputStr)
Esempio n. 6
0
def main():
    ## 读取yaml数据
    textTagList = yamlDict.get('textTagList')
    colorList = yamlDict.get('colorList')
    titleName = yamlDict.get('titleName')
    yUnit = yamlDict.get('yUnit')
    divUnit = yamlDict.get('divUnit') or 1

    ## 获取hive连接信息
    hiveDB = traffic.hiveDB or hive_conn.hiveDB
    hiveTable = traffic.hiveTable or hive_conn.hiveTable

    ## 定义输出的数据列表
    dataList = []
    for t in textTagList:
        ## 总数data列表
        masterList = []
        totalValue = 0
        for w in t.get('total'):
            filterKeys = w.get('wds')
            ## hive查询语句
            HQL = "SELECT year,value FROM {db}.{tb} WHERE wds='{fk}' \
                AND year=(SELECT MAX(year) FROM {db}.{tb} WHERE wds='{fk}' AND value!=0)".format(
                db=hiveDB, tb=hiveTable, fk=filterKeys)
            result = spark_hive_query(HQL=HQL)
            totalValue += round(float(result[0][1]) / divUnit, 2)
            ## 获取数据年份
            year = str(result[0][0]) + '年'

            ## 总数data列表
            masterList.append({'name': w.get('name'), 'value': totalValue})

        ## 总数列表其他数据
        name = year + t.get('name') + yUnit
        radius = t.get('master').get('radius')
        center = t.get('master').get('center')

        ## 总数列表
        dataList.append({
            'name': name,
            'radius': radius,
            'center': center,
            'data': masterList
        })

        ## 具体项列表
        branchList = []
        ## 剩余值
        remainValue = totalValue
        for w in t.get('specific'):
            filterKeys = w.get('wds')
            HQL = "SELECT year,value FROM {db}.{tb} WHERE wds='{fk}' \
                AND year=(SELECT MAX(year) FROM {db}.{tb} WHERE wds='{fk}' AND value!=0)".format(
                db=hiveDB, tb=hiveTable, fk=filterKeys)
            specValue = round(
                float(spark_hive_query(HQL=HQL)[0][1]) / divUnit, 2)
            remainValue -= specValue

            branchList.append({'name': w.get('name'), 'value': specValue})

        ## 修正由四舍五入导致的误差
        if remainValue <= 0:
            remainValue = 0

        branchList.append({'name': '其他', 'value': round(remainValue, 2)})

        name = year + t.get('name') + yUnit
        radius = t.get('branch').get('radius')
        center = t.get('branch').get('center')

        dataList.append({
            'name': name,
            'radius': radius,
            'center': center,
            'data': branchList
        })

    outputDict = {
        'name': titleName,
        'label': legend(),
        'color': colorList,
        'data': dataList
    }

    ## 转json字符串,不转码
    outputStr = json.dumps(outputDict, ensure_ascii=False)

    ## 存储结果到mysql数据库
    mysql_update(dataLabel=traffic.label, dataValues=outputStr)