예제 #1
0
async def get_estimated_value_by_id(
        conn: Database,
        asset_id: int,
        data_id: int,
        session: Session = session_make(engine=None),
):
    hi_model = AssetHI.model(point_id=asset_id)
    query = session.query(hi_model.id,
                          hi_model.est).filter(hi_model.id == data_id)
    res = await conn.fetch_one(query2sql(query))
    dic = {"id": res["id"], "est": json.loads(res["est"])}
    return dic
예제 #2
0
async def read_asset_info(id: int, conn: Database = Depends(get_db)):
    """
    Get Asset Info by ID.
    """
    session = session_make(meta_engine)
    info = await get_info(session=session, conn=conn, id=id)
    if not info:
        raise HTTPException(
            status_code=400,
            detail="Item not found. / Asset Information have not been record.",
        )
    return dict(info)
예제 #3
0
async def get_mp_mapper():
    if len(measure_point_router) == 0:
        async with Database(META_URL) as conn:
            session = session_make(engine=None)
            query = session.query(MeasurePoint)
            res = await conn.fetch_all(query2sql(query))
            for row in res:
                measure_point_router[row["id"]] = {
                    "sid": row["station_id"],
                    "iid": row["inner_station_id"],
                    "type": row["type"],
                }
    return measure_point_router
예제 #4
0
async def get_similarity_threshold_recently(
        conn: Database,
        asset_id: int,
        limit: int,
        session: Session = session_make(engine=None),
):
    hi_model = AssetHI.model(point_id=asset_id)
    query = (session.query(hi_model.id, hi_model.time, hi_model.similarity,
                           hi_model.threshold).order_by(
                               hi_model.time.desc()).limit(limit))
    res = await conn.fetch_all(query2sql(query))
    res.reverse()
    dic = multi_result_to_array(res)
    return dic
예제 #5
0
async def get_similarity_threshold_during_time(
        conn: Database,
        asset_id: int,
        time_before: str,
        time_after: str,
        session: Session = session_make(engine=None),
):
    hi_model = AssetHI.model(point_id=asset_id)
    query = session.query(hi_model.id, hi_model.time, hi_model.similarity,
                          hi_model.threshold).filter(
                              hi_model.time.between(str(time_after),
                                                    str(time_before)))

    res = await conn.fetch_all(query2sql(query))
    dic = multi_result_to_array(res)
    return dic
예제 #6
0
async def get_estimated_value_multi(
        conn: Database,
        asset_id: int,
        time_before: str,
        time_after: str,
        session: Session = session_make(engine=None),
):
    hi_model = AssetHI.model(point_id=asset_id)
    query = session.query(hi_model.id, hi_model.time, hi_model.est).filter(
        hi_model.time.between(str(time_after), str(time_before)))
    res = await conn.fetch_all(query2sql(query))

    dic = {}
    for row in res:
        dic.setdefault("id", []).append(row["id"])
        dic.setdefault("time", []).append(str(row["time"]))

        serialized = json.loads(row["est"])
        for index, fileds in enumerate(serialized["label"]):
            dic.setdefault(fileds + "—原始值",
                           []).append(serialized["raw"][index])
            dic.setdefault(fileds + "-估计值",
                           []).append(serialized["est"][index])
    return dic
예제 #7
0
def mset_evaluate(cycle_number):
    estimate_count = 0
    session = session_make(engine=meta_engine)
    pumps = fetch_pumps(session)
    for pump in pumps:

        asset_hi_model = AssetHI.model(point_id=pump.asset_id)
        mps = fetch_mps(session=session, asset_id=pump.asset_id)

        if len(mps) > 0:
            base_data_list = fetch_base_data(
                session=session,
                cycle_number=cycle_number,
                base_mp=mps[0],
                asset_id=pump.asset_id,
            )
            if len(base_data_list) == cycle_number:
                feature_matrix = fetch_feature_matrix(
                    session=session, base_data_list=base_data_list, mps=mps)
                sim, thres, Kest, warning_index = evaluate(
                    path=pump.mset_model_path, feature_matrix=feature_matrix)

                evaluate_res_insert_value = []

                for i in range(len(base_data_list)):

                    evaluate_res_insert_value.append(
                        asset_hi_model(
                            health_indicator=float(sim[i][0] * 100),
                            similarity=float(sim[i][0]),
                            threshold=float(thres[i][0]),
                            time=base_data_list[i]["time"],
                            data_id=base_data_list[i]["id"],
                            est={
                                "label": [mp.name for mp in mps],
                                "raw": feature_matrix[i].tolist(),
                                "est": Kest[i].tolist(),
                            },
                        ))
                try:
                    for index, row in enumerate(evaluate_res_insert_value):

                        session.add(row)
                        session.commit()
                        if len(warning_index) != 0:
                            if index in warning_index:
                                session.add(
                                    MsetWarningLog(
                                        cr_time=base_data_list[index]["time"],
                                        description=mps[np.argmax(
                                            feature_matrix[index] -
                                            Kest[index])].name + "异常。",
                                        asset_id=pump.asset_id,
                                        reporter_id=row.id,
                                    ))
                            session.commit()
                    session.query(Asset).filter(
                        Asset.id == pump.asset_id).update({
                            "statu":
                            determine_statu(feature_matrix=feature_matrix),
                            "health_indicator":
                            evaluate_res_insert_value[-1].health_indicator,
                            "md_time":
                            datetime.datetime.now(),
                        })
                    session.commit()
                    estimate_count += len(evaluate_res_insert_value)
                except Exception as e:
                    session.rollback()
                    print(e)

    session.close()

    return estimate_count
예제 #8
0
def expert_system_diagnosis():
    session = session_make(engine=meta_engine)
    assets = fetch_assets(session)
    mps = fetch_mps(session)
    processed_row = 0
    # 获取设备-测点树
    for mp in mps:
        assets[mp.asset_id].setdefault("mps", {})
        assets[mp.asset_id]["mps"][mp.name] = mp._asdict()

    for (asset_id, asset) in assets.items():  # 遍历所有设备
        if "mps" in asset:  # 跳过未添加测点的设备
            for (mp_name, mp) in asset["mps"].items():  # 遍历某设备的所有测点

                signal_list = fetch_data(session, mp)
                # 获取轴承信息
                bearing_id, bearing_info = fetch_bearing_info(session,
                                                              asset=asset,
                                                              mp=mp)

                if (bearing_id is not None) & (len(bearing_info) != 0):
                    th = (session.query(Threshold.id,
                                        Threshold.diag_threshold).filter(
                                            Threshold.mp_pattern ==
                                            mp["position"].name).order_by(
                                                Threshold.id.desc()).one())

                    diag_res_insert_value = []
                    for index, signal in enumerate(signal_list):
                        diag_method = position_type_mapper[
                            mp["position"].name][2]
                        diag_res, marks, indicators = diag_method(
                            data=signal.vib,
                            fs=10000,
                            R=2800,
                            bearing_ratio=bearing_info._asdict(),
                            th=th.diag_threshold,
                        )
                        diag_res_sum = np.array(list(diag_res.values())).sum()
                        try:
                            if diag_res_sum != 0:
                                diag_res_insert_value.append(
                                    WarningLog(description=diag_res,
                                               marks=marks,
                                               threshold_id=th.id,
                                               severity=int(diag_res_sum - 1)
                                               if diag_res_sum < 3 else 2,
                                               asset_id=asset_id,
                                               mp_id=mp["id"],
                                               cr_time=signal.time,
                                               is_read=False,
                                               data_id=signal.id,
                                               **indicators))
                            if index == len(signal_list) - 1:
                                session.query(MeasurePoint).filter(
                                    MeasurePoint.id == mp["id"]).update({
                                        "id":
                                        mp["id"],
                                        "statu":
                                        int(diag_res_sum)
                                        if diag_res_sum < 4 else 3,
                                        "md_time":
                                        datetime.datetime.now(),
                                        "last_diag_id":
                                        signal.id,
                                    })
                                session.add_all(diag_res_insert_value)
                            session.commit()
                            processed_row += 1
                        except Exception as e:
                            session.rollback()
                            print(e)
    session.close()
    return processed_row
예제 #9
0
import datetime
import random

import MySQLdb
import numpy as np
from sqlalchemy.ext.declarative import declarative_base
from db.conn_engine import META_URL
from db import session_make, meta_engine
from db_model import VibData, VibFeature, MeasurePoint, ElecFeature, ElecData
from utils.elec_feature_tool import feature_calculator
from utils.simulators import unbalance, misalignment, a_loose, b_loose, rubbing
from sqlalchemy import create_engine

session = session_make(meta_engine)

base = declarative_base()
engine = create_engine(META_URL, encoding="utf-8", pool_pre_ping=True)
x = session.query(MeasurePoint).filter(
    MeasurePoint.station_id == 7, MeasurePoint.type == 0
)

for row in x:
    model = VibData.model(
        station_id=row.station_id, inner_id=row.inner_station_id, base=base
    )  # registe to metadata for all pump_unit
    fea_model = VibFeature.model(
        station_id=row.station_id, inner_id=row.inner_station_id, base=base
    )

base.metadata.create_all(engine)
예제 #10
0
async def get_avg_hi_pre(conn: Database,
                         session: Session = session_make(engine=None)):
    query = session.query(Asset.id, Asset.name).filter(Asset.asset_type == 0)
    res = await conn.fetch_all(query2sql(query))
    return res