Esempio n. 1
0
    def __init__(self):
        self.pymortar_client = pymortar.Client()

        price_path = PRICE_DATA_PATH / "price-mapping.csv"
        if not os.path.isfile(str(price_path)):
            logging.critical("Error: could not find file at: %s",
                             str(price_path))
            sys.exit()

        self.price_mapping = pd.read_csv(str(price_path))
        tariffs_utilities = []
        self.all_tariffs_utilities_dfs = {}
        for utility, tariffs in ALL_TARIFFS_UTILITIES.items():
            for tariff in tariffs:
                tariffs_utilities.append(
                    price_pb2.TariffUtilityReply(tariff=tariff,
                                                 utility=utility))
                df = pd.read_csv(
                    PRICE_DATA_PATH /
                    ("prices_01012017_040172019/" + tariff + ".csv"),
                    index_col=[0],
                    parse_dates=False)
                df = df.fillna(0)
                df.index = pd.to_datetime(df.index)
                df = df.tz_localize("US/Pacific",
                                    nonexistent='shift_forward',
                                    ambiguous=False).tz_convert(pytz.utc)
                self.all_tariffs_utilities_dfs[tariff] = df
        self.all_tariffs_utilities = price_pb2.AllTariffUtilityReply(
            tariffs_utilities=tariffs_utilities)
Esempio n. 2
0
def evaluate(site, date, model_name='best'):
    cli = pymortar.Client()
    date = pd.to_datetime(date).date()
    import sys
    best_model_path = './models/{}/{}'.format(site, model_name)
    model_file = open(best_model_path, 'rb')
    best_model = pickle.load(model_file)
    actual, prediction, event_weather, baseline_weather = best_model.predict(
        site, date)
    weather_mean = event_weather[((event_weather.index.hour >= 14) &
                                  (event_weather.index.hour <= 18))].mean()
    daily_data = get_daily_data(site, actual, prediction)
    return {
        'site': site,
        'date': date,
        'energy cost': {
            'actual': daily_data['actual_cost'],
            'baseline': daily_data['baseline_cost']
        },
        'OAT_mean': {
            'event': weather_mean['weather'],
            'baseline': baseline_weather
        },
        'baseline-type': best_model.name,
        'baseline-rmse': best_model.rmse,
        'actual': actual.values,
        'baseline': prediction.values
    }
Esempio n. 3
0
    def __init__(self):
        """ Constructor.

        Note
        ----
        For pymortar, set the evironment variables - $MORTAR_API_USERNAME & $MORTAR_API_PASSWORD.

        For Mac,
        1. vi ~/.bash_profile
        2. Add at the end of file,
            1. export $MORTAR_API_USERNAME=username
            2. export $MORTAR_API_PASSWORD=password
        3. source ~/.bash_profile

        """

        # Pymortar client
        self.pymortar_client = pymortar.Client()

        # List of zones in building
        building_names_stub = xbos_services_getter.get_building_zone_names_stub(
        )
        self.supported_buildings = xbos_services_getter.get_buildings(
            building_names_stub)

        self.pymortar_objects = {
            'MEAN': pymortar.MEAN,
            'MAX': pymortar.MAX,
            'MIN': pymortar.MIN,
            'COUNT': pymortar.COUNT,
            'SUM': pymortar.SUM,
            'RAW': pymortar.RAW
        }
Esempio n. 4
0
 def setup(self, cfg):
     self.pymortar_client = pymortar.Client({
         'username':
         cfg['mortar_api_username'],
         'password':
         cfg['mortar_api_password']
     })
     self.bacnet = BAC0.connect()
     # A dictionary mapping BACnet point names to their Brick classes grouped by Equipment class
     self.point_class_dict = get_point_class_dict(self.pymortar_client,
                                                  cfg['building'])
     # A list of all the BACnet devices in the network
     self.devices = [
         BAC0.device(dev[2], dev[3], self.bacnet)
         for dev in self.bacnet.devices
     ]
Esempio n. 5
0
def zone_comfort_evaluation(sensor, index):
    """
    Evaluate long-term thermal comfort for all buildings
    
    Parameters
    ----------
    sensor : str
             sensor name type to evaluate e.g. Zone_Air_Temperature
    index  : str
             calculation method name e.g. hourly_outlier
        

    Returns
    ----------
    p : float
        percentage of the time
    """
    
    # build query and get which sites have the point to do this analysis
    qualify_resp, query = _query_and_qualify(sensor)
    
    # connect client to Mortar frontend server
    client = pymortar.Client("https://beta-api.mortardata.org")
    
    # evalute all available sites
    for i in qualify_resp.sites:
        # get the metadata of all query sensors at a specific site
        metadata = client.sparql(query, sites=[i])
        
        # get a datafame from each sensor at the site
        for idxi, row in metadata.iterrows():
            
            # get data for sensor through all time
            query_resp = client.data_uris([row['sensor']])
            df = query_resp.data
            
            # parse the hour and weekday info and add it as a column
            df['hr'] = pd.to_datetime(df['time']).dt.hour
            df['wk'] = pd.to_datetime(df['time']).dt.dayofweek
            
            # get rows that among the specified office hours during weekdays
            df_occ = df[(df['hr'] >= 9) & (df['hr'] < 17) &
                        (df['wk'] >= 0) & (df['wk'] <= 4)]
            
            
    print('##### App has finished #####')
Esempio n. 6
0
def _query_and_qualify(sensor):
    """
    Build query to return zone air temperature measurements and qualify
    which site can run this application
    Parameters
    ----------
    sensor : sensor name type to evaluate e.g. Zone_Air_Temperature
    Returns
    -------
    qualify_resp : Mortar QualifyResponse object
    query : dictionary with query and sensor
    """
    # connect to client
    client = pymortar.Client(MORTAR_URL)

    # initialize container for query information
    query = dict()

    # define queries for input sensors and setpoints
    sensor_query = """SELECT ?sensor ?equip WHERE {{
        ?sensor    rdf:type/rdfs:subClassOf*     brick:{0}_Sensor .
        ?sensor    brick:isPointOf ?equip .
    }}""".format(sensor)

    setpoint_query = """SELECT ?sp ?equip WHERE {{
        ?sp    rdf:type/rdfs:subClassOf*     brick:{0}_Setpoint .
        ?sp    brick:isPointOf ?equip .
    }}""".format(sensor)

    # find sites with input sensors and setpoints
    qualify_resp = client.qualify({"measurement": sensor_query, "setpoint": setpoint_query})
    # if qualify_resp.error != "":
    #     print("ERROR: ", qualify_resp.error)
    #     os.exit(1)

    # save queries and sensor information
    query['query'] = dict()
    query['query']['sensor'] = sensor_query
    query['query']['setpoint'] = setpoint_query
    query['sensor'] = sensor

    print("running on {0} sites".format(len(qualify_resp.sites)))
    print(qualify_resp.sites)

    return qualify_resp, query
Esempio n. 7
0
File: server.py Progetto: kuzha/XBOS
 def __init__(self):
     try:
         self.pymortar_client = pymortar.Client()
         outdoor_temperature_historical_path = OUTDOOR_TEMPERATURE_HISTORICAL_DATA_PATH / "weather-mapping.csv"
         if not os.path.isfile(str(outdoor_temperature_historical_path)):
             logging.critical("Error: could not find file at: %s",
                              str(outdoor_temperature_historical_path))
             sys.exit()
         self.weather_mapping = pd.read_csv(
             str(outdoor_temperature_historical_path))
         self.fb_prophet_models = train_models(self.weather_mapping,
                                               self.pymortar_client)
         print("ready to serve")
     except Exception:
         tb = traceback.format_exc()
         logging.critical(
             "Error: failed to initialize microservice or models: %s", tb)
         sys.exit()
Esempio n. 8
0
def main():
    
    pymortar_client = pymortar.Client()
    buildings = ['orinda-public-library']
    proto_json = gen_proto_json() if os.path.exists('hvac.proto') and os.stat("hvac.proto").st_size != 0 else {}
    f = open("hvac.proto","w+")
    clustered_equipment = {"Damper": {}, "Fan": {}, "Thermostat": {}}
    
    f.write("syntax = \"proto3\";\r\n\n")
    f.write('package xbospb;\n\nimport "brick.proto";\nimport "nullabletypes.proto";\n\n')

    for building in buildings:
        # Go through all buildings, get equipment, and their respective points
        equipment = get_equipment_list(building, pymortar_client)

        #Add proto definitions for each piece of equipment
        for eq in equipment:
            points = dict.fromkeys(get_points_for_equipment(eq, building, pymortar_client), 0)

            # Dictates if the current equipment being processed is part of a bigger cluster or
            # is going to be its own message in hvac.proto
            clustered = False

            for clustered_eq in clustered_equipment:
                # Union all the clustered equipment
                if clustered_eq.lower() in eq.lower():
                    points = set(clustered_equipment[clustered_eq].keys()).union(points.keys())
                    clustered_equipment[clustered_eq] = dict.fromkeys(points, 0)
                    clustered = True
                    break

            if not clustered:
                # Write unclustered equipment to hvac.proto
                match_proto_numbering_and_write(f, eq, points, proto_json)

    # Write all clustered equipment to hvac.proto
    for eq in clustered_equipment:
        points = clustered_equipment[eq]
        if points:
            match_proto_numbering_and_write(f, eq, points, proto_json)
    
    f.close()
Esempio n. 9
0
def evaluate(site, date, model_name='best'):
    cli = pymortar.Client()
    date = pd.to_datetime(date).date()
    best_model_path = './models/{}/{}.txt'.format(site, model_name)
    model_file = open(best_model_path, 'rb')
    best_model = pickle.load(model_file)
    actual, prediction = best_model.predict(site, date)
    daily_data = get_daily_data(site, actual, prediction)
    return {
        'site': site,
        'date': date,
        'cost': {
            'actual': daily_data['actual_cost'],
            'baseline': daily_data['baseline_cost']
        },
        'degree-days': {
            'cooling': None,
            'heating': None
        },
        'baseline-type': best_model.name,
        'baseline-rmse': best_model.rmse,
        'actual': actual.values,
        'baseline': prediction.values
    }
Esempio n. 10
0
def read_config():
    """ Reads config.json file that contains parameters for baselines and fetches data from Mortar. 

	Returns
	-------
	pd.DataFrame(), pd.DataFrame(), default(list), default(list)
		meter data, oat data, map of uuid to meter data, map of uuid to oat data
	
	"""

    # Instatiate Client
    client = pymortar.Client({})

    # Query for meter data
    query_meter = "SELECT ?meter WHERE { ?meter rdf:type/rdfs:subClassOf* brick:Green_Button_Meter };"

    # Query for outdoor air temperature data
    query_oat = """ SELECT ?t WHERE { ?t rdf:type/rdfs:subClassOf* brick:Weather_Temperature_Sensor };"""

    # Get list of sites for meter data and OAT data
    resp_meter = client.qualify([query_meter])
    resp_oat = client.qualify([query_oat])

    if resp_meter.error or resp_oat.error:
        print("ERORR: ", resp_meter.error if True else resp_oat.error)
        os._exit(0)
    else:
        # Get list of sites that are common for meter data and OAT data
        common_sites = list(
            set(resp_meter.sites).intersection(set(resp_oat.sites)))

        # If config['sites'] = "", then default to all sites
        if not config['sites']:
            config['sites'] = common_sites
        else:
            for site in config['sites']:
                if site not in common_sites:
                    print('Incorrect site name.')
                    os._exit(0)
            print("Running on {0} sites".format(len(config['sites'])))

    # Define the view of meters (metadata)
    meter = pymortar.View(
        name="view_meter",
        sites=config['sites'],
        definition=query_meter,
    )

    # Define the view of OAT (metadata)
    oat = pymortar.View(name="view_oat",
                        sites=config['sites'],
                        definition=query_oat)

    # Define the meter timeseries stream
    data_view_meter = pymortar.DataFrame(
        name="data_meter",  # dataframe column name
        aggregation=pymortar.MEAN,
        window="15m",
        timeseries=[
            pymortar.Timeseries(view="view_meter", dataVars=["?meter"])
        ])

    # Define the OAT timeseries stream
    data_view_oat = pymortar.DataFrame(
        name="data_oat",
        aggregation=pymortar.MEAN,
        window="15m",
        timeseries=[pymortar.Timeseries(view="view_oat", dataVars=["?t"])])

    # Define timeframe
    time_params = pymortar.TimeParams(start=config['time']['start'],
                                      end=config['time']['end'])

    # Form the full request object
    request = pymortar.FetchRequest(
        sites=config['sites'],
        views=[meter, oat],
        dataFrames=[data_view_meter, data_view_oat],
        time=time_params)

    # Fetch data from request
    data = client.fetch(request)

    # Renames columns from uuids' to sitenames'
    map_uuid_meter, map_uuid_oat = map_uuid_sitename(data)

    # Save data to csv file
    if config['save_data']:
        data['data_meter'].to_csv('meter_data.csv')
        data['data_oat'].to_csv('oat_data.csv')

    return data['data_meter'], data['data_oat'], map_uuid_meter, map_uuid_oat
Esempio n. 11
0
import pymortar
import time
import pytz
import datetime
from rfc3339 import rfc3339
import xbos_services_getter as xbos

pymortar_client = pymortar.Client()

def get_outside_temp_data(building):

    interval = 3600

    outside_temperature_query = """SELECT ?temp WHERE {
        ?temp rdf:type brick:Weather_Temperature_Sensor .
    };"""

    weather_stations_view = pymortar.View(
        name="weather_stations_view",
        sites=[building],
        definition=outside_temperature_query,
    )

    weather_stations_stream = pymortar.DataFrame(
        name="weather_stations",
        aggregation=pymortar.MEAN,
        window=str(int(interval)) + 's',
        timeseries=[
            pymortar.Timeseries(
                view="weather_stations_view",
                dataVars=["?temp"],
import pandas as pd

from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from pandas.tseries.offsets import CustomBusinessDay
import matplotlib.pyplot as plt

import numpy as np
import datetime, pytz

from .get_data import get_weather, get_df
from .utils import get_window_of_day

import pymortar

cli = pymortar.Client()

def _remove_PDP_days(data, PDP_list):

    try:
        for i in PDP_list:
            day=datetime.datetime.strptime(str(i), "%Y-%m-%d").date()
            data=data[~(data.index.date == day)]
        return data

    except:
        print('error in _remove_PDP_days')
        return data


def _remove_WE_holidays_NaN(data, start, end):
Esempio n. 13
0
File: server.py Progetto: kuzha/XBOS
 def __init__(self):
     self.pymortar_client = pymortar.Client()
Esempio n. 14
0
def read_config():
    """ Reads config.json file to obtain parameters and fetch data from Mortar.

	Returns
	-------
	pd.DataFrame(), pd.DataFrame(), default(list), default(list)
		meter data, occupancy data, map of uuid to meter data, map of uuid to occupancy data
	
	"""

    # Instatiate Client
    client = pymortar.Client({})

    # Query for meter data
    query_meter = "SELECT ?meter WHERE { ?meter rdf:type/rdfs:subClassOf* brick:Green_Button_Meter };"

    # Query for occupancy data
    query_occupancy = "SELECT ?point WHERE { ?point rdf:type/rdfs:subClassOf* brick:Occupancy_Sensor };"

    # Get list of sites for meter data and occupancy data
    resp_meter = client.qualify([query_meter])
    resp_occupancy = client.qualify([query_occupancy])

    if resp_meter.error or resp_occupancy.error:
        print("ERORR: ", resp_meter.error if True else resp_occupancy.error)
        os._exit(0)
    else:

        # Get list of sites that are common for meter data and occupancy data
        common_sites = list(
            set(resp_meter.sites).intersection(set(resp_occupancy.sites)))

        # If config['sites'] = "", then default to all sites
        if not config['sites']:
            config['sites'] = common_sites
        else:
            for site in config['sites']:
                if site not in common_sites:
                    print('Incorrect site name.')
                    os._exit(0)
            print("Running on {0} sites".format(len(config['sites'])))

    # Define the view of meters (metadata)
    meter = pymortar.View(
        name="view_meter",
        sites=config['sites'],
        definition=query_meter,
    )

    # Define the view of OAT (metadata)
    occupancy = pymortar.View(name="view_occupancy",
                              sites=config['sites'],
                              definition=query_occupancy)

    # Define the meter timeseries stream
    data_view_meter = pymortar.DataFrame(
        name="data_meter",  # dataframe column name
        aggregation=pymortar.MEAN,
        window="15m",
        timeseries=[
            pymortar.Timeseries(view="view_meter", dataVars=["?meter"])
        ])

    # Define the occupancy timeseries stream
    data_view_occupancy = pymortar.DataFrame(
        name="data_occupancy",  # dataframe column name
        aggregation=pymortar.RAW,
        window="",
        timeseries=[
            pymortar.Timeseries(view="view_occupancy", dataVars=["?point"])
        ])

    # Define timeframe
    time_params = pymortar.TimeParams(start=config['time']['start'],
                                      end=config['time']['end'])

    # Form the full request object
    request = pymortar.FetchRequest(
        sites=config['sites'],
        views=[meter, occupancy],
        dataFrames=[data_view_meter, data_view_occupancy],
        time=time_params)

    # Fetch data from request
    response = client.fetch(request)

    # Save data to csv file
    if config['save_data']:
        response['data_meter'].to_csv('meter_data.csv')
        response['data_occupancy'].to_csv('occupancy_data.csv')

    # Create results folder if it doesn't exist
    if not os.path.exists('./' + config['results_folder']):
        os.mkdir('./' + config['results_folder'])

    return response
Esempio n. 15
0
import pymortar
import os
import pandas as pd

# use default values (environment variables):
# MORTAR_API_ADDRESS: mortardata.org:9001
# MORTAR_API_USERNAME: required
# MORTAR_API_PASSWORD: required
client = pymortar.Client({})

meter_query = "SELECT ?meter WHERE { ?meter rdf:type/rdfs:subClassOf* brick:Building_Electric_Meter };"

# run qualify stage to get list of sites with electric meters
resp = client.qualify([meter_query])
if resp.error != "":
    print("ERROR: ", resp.error)
    os.exit(1)

print("running on {0} sites".format(len(resp.sites)))

# define the view of meters (metadata)
meters = pymortar.View(
    sites=resp.sites,
    name="meters",
    definition=meter_query,
)

# define the meter timeseries streams we want
meter_data = pymortar.DataFrame(
    name="meters",
    aggregation=pymortar.MEAN,
Esempio n. 16
0
import json
import glob
import os
from bson import json_util
from collections import defaultdict
from functools import update_wrapper
from datetime import datetime, timedelta
from dashutil import get_start, generate_months, prevmonday, get_today

app = Flask(__name__, static_url_path='/static')

config = toml.load('config.toml')
TZ = pytz.timezone('US/Pacific')
client = pymortar.Client({
    'mortar_address': config['Mortar']['url'],
    'username': config['Mortar']['username'],
    'password': config['Mortar']['password'],
})
sites = [config['Dashboard']['sitename']]

# MongoDB configurations
app.config['MONGO_DBNAME'] = 'modes'
app.config["MONGO_URI"] = "mongodb://localhost:27017/modes"
mongo = PyMongo(app)

# Push default modes to mongodb once script starts
INITIALIZED = False


def crossdomain(origin=None,
                methods=None,
Esempio n. 17
0
def _analyze(query, fetch_resp, th_type='abs', th_diff=0.25, th_time=15):
    """
    Parameters
    ----------
    query : dictionary with query and sensor
    fetch_resp : Mortar FetchResponse object
    th_type : Type of comparison performed when evaluating sensor measurement against the setpoint value.
              Available options are (any input value within list is valid):
                ['under', 'u', '-', 'neg', '<'] = return sensors that are under setpoint by th_diff for th_time
                ['over', 'o', '+', 'pos', '>']  = return sensors that are over setpoint by th_diff for th_time
                ['outbound', 'outbounds', 'ob', '><'] = return sensors that are either under minimum setpoint value by th_diff
                                                    or over maximum setpoint value by th_diff for th_time
                ['bounded', 'inbounds','inbound', 'ib', '<>'] = return sensors that are within minimum setpoint value + th_diff
                                                                and maximum setpoint value - th_diff
                ['abs', ''] (default type) = return sensors that are +/- th_diff of setpoint value.
    th_diff: threshold allowance for determining if sensor measurement is not adhereing to setpoint
             in the same units of selected sensor e.g. if 'over' is selected for th_type and 2 for
             th_diff then 'bad sensors' will return whenever sensor measurement is setpoint + 2.
    th_time : Amount of time in minutes that a sensor measurment needs to meet the selected criteria in order to qualify as 'bad'.
             Must be greater or equal and a multiple of the data aggregation window.
    window : aggregation window in minutes that the data from sensors and setpoint are in
    Returns
    -------
    None
    The app produces a CSV file called `<sensor>_measure_vs_setpoint_<type of analysis>.csv` when run
        where '<sensor>' states the sensor type and '<analysis>' states the type of analysis performed.
    """
    # connect to client
    client = pymortar.Client(MORTAR_URL)

    sensor  = query['sensor']
    comb_metadata = _clean_metadata(fetch_resp)

    start = fetch_resp['time']['start']
    end = fetch_resp['time']['end']
    window = fetch_resp['time']['window']
    records = []

    for idxi, row in comb_metadata.iterrows():
        # get data for sensor and setpoint
        sensor_df = client.data_uris([row['sensor']], start=start, end=end, agg='mean', window="{}m".format(window))
        setpoint_df = client.data_uris([row['sp']], start=start, end=end, agg='mean', window="{}m".format(window))

        if not any([sensor_df.data.empty, setpoint_df.data.empty]):
            df = _clean_df(sensor_df.data, setpoint_df.data)

            if False:
                zone_name = str(row['sensor']).split('#')[1]
                csv_name = f"./zone_dat/{row['site']}-{zone_name}-{idxi}.csv"

                with open(csv_name, 'w') as fout:
                    fout.write(f"sensor: {row['sensor']}\n")
                    fout.write(f"setpoint {row['sp']}\n\n")

                    df.drop(columns=['sensor_id', 'setpoint_id']).to_csv(fout, index=False)
                fout.close
        else:
            continue

        # # for each equipment, pull the UUID for the sensor and setpoint
        # q = """
        # SELECT sensor_uuid, sp_uuid, {1}_sps.equip, {1}_sps.site
        # FROM {1}_sensors
        # LEFT JOIN {1}_sps
        # ON {1}_sps.equip = {1}_sensors.equip
        # WHERE {1}_sensors.equip = "{0}";
        # """.format(equip, sensor)

        # res = fetch_resp.query(q)
        # if len(res) == 0:
        #     continue

        # sensor_col = res[0][0]
        # setpoint_col = res[0][1]

        # if sensor_col is None or setpoint_col is None:
        #     continue

        # if sensor_col not in sensor_df:
        #     print('no sensor', sensor_col)
        #     continue

        # if setpoint_col not in setpoint_df:
        #     print('no sp', setpoint_col)
        #     continue

        # # create the dataframe for this pair of sensor and setpoint
        # df = pd.DataFrame([sensor_df[sensor_col], setpoint_df[setpoint_col]]).T
        # df.columns = ["{}_sensors".format(sensor), "{}_sps".format(sensor)]

        if th_type in ['under', 'u', '-', 'neg', '<']: # if measurement is under sp by th_diff
            bad = (df["sensor_val"]) < (df["setpoint_val"] - th_diff)
            str_th_type = 'Undershooting'

        elif th_type in ['over', 'o', '+', 'pos', '>']: # if measurement is over sp by th_diff
            bad = (df["sensor_val"]) > (df["setpoint_val"] + th_diff)
            str_th_type = 'Overshooting'

        elif th_type in ['outbound', 'outbounds', 'ob', '><']: # if measurement is either below min sp or above max sp by th_diff
            max_sp = df["setpoint_val"].max()
            min_sp = df["setpoint_val"].min()

            bad_max = (df["sensor_val"]) > (max_sp + th_diff)
            bad_min = (df["sensor_val"]) < (min_sp - th_diff)

            bad = pd.DataFrame([bad_min, bad_max]).all()
            str_th_type = 'Exceedance_of_min-max'

        elif th_type in ['bounded', 'inbounds','inbound', 'ib', '<>']: # if measurement is either within min and max sp by th_diff
            max_sp = df["setpoint_val"].max()
            min_sp = df["setpoint_val"].min()

            bad_max = (df["sensor_val"]) < (max_sp - th_diff)
            bad_min = (df["sensor_val"]) > (min_sp + th_diff)

            bad = pd.DataFrame([bad_min, bad_max]).all()
            str_th_type = 'Within_min-max'

        else:
            bad = abs(df["sensor_val"] - df["setpoint_val"]) > th_diff
            str_th_type = 'Not_within_setpoint'

        if len(df[bad]) == 0: continue
        df['same'] = bad.astype(int).diff(1).cumsum()
        # this increments every time we get a new run of sensor being below the setpoint
        # use this to group up those ranges
        df['same2'] = bad.astype(int).diff().ne(0).cumsum()

        lal = df[bad].groupby('same2')['same']
        # grouped by ranges that meet the predicate (df.sensor + th_diff < df.setpoint)
        for g in lal.groups:
            idx = list(lal.groups[g])
            if len(idx) < 2: continue
            data = df[idx[0]:idx[-1]]
            if len(data) >= (60/th_time): # multiply by window frame to get hours
                fmt = {
                    'site': row['site'],
                    'equipment': row['equip'],
                    'hours': len(data) / (60/window),
                    'start': idx[0],
                    'end': idx[-1],
                    'sensor_val': (data["setpoint_val"]).mean(),
                    'setpoint_val': (data["sensor_val"]).mean(),
                    'diff': (data["setpoint_val"] - data["sensor_val"]).mean(),
                }
                records.append(fmt)
                print("{str_th_type} {sensor} for {hours} hours From {start} to {end}, avg diff {diff:.2f}".format(**fmt,
                                                                                                                   sensor=sensor,
                                                                                                                   str_th_type=str_th_type))

    r = pd.DataFrame(records)
    print('##### Saving Results #####')
    r.to_csv('{}_measure_vs_setpoint_{}.csv'.format(sensor, str_th_type), index=False)
Esempio n. 18
0
def _fetch(qualify_resp, query, eval_start_time, eval_end_time, window=15):
    """
    Build the fetch query and define the thermal comfort evaluation time.
    Parameters
    ----------
    qualify_resp : Mortar QualifyResponse object
    query : dictionary with query and sensor
    eval_start_time : start date and time in format (yyyy-mm-ddTHH:MM:SSZ) for the thermal
                      comfort evaluation period
    eval_end_time : end date and time in format (yyyy-mm-ddTHH:MM:SSZ) for the thermal
                    comfort evaluation period
    window : aggregation window in minutes to average the measurement data
    Returns
    -------
    fetch_resp : Mortar FetchResponse object
    """
    # connect to client
    client = pymortar.Client(MORTAR_URL)

    sensor         = query['sensor']
    sensor_query   = query['query']['sensor']
    setpoint_query = query['query']['setpoint']

    # build the fetch request
    avail_sites = qualify_resp.sites

    # request = pymortar.FetchRequest(
    #     sites=req_sites,
    #     views=[
    #         pymortar.View(
    #             name="{}_sensors".format(sensor),
    #             definition=sensor_query,
    #         ),
    #         pymortar.View(
    #             name="{}_sps".format(sensor),
    #             definition=setpoint_query,
    #         )
    #     ],
    #     dataFrames=[
    #         pymortar.DataFrame(
    #             name="sensors",
    #             aggregation=pymortar.MEAN,
    #             window="{}m".format(window),
    #             timeseries=[
    #                 pymortar.Timeseries(
    #                     view="{}_sensors".format(sensor),
    #                     dataVars=["?sensor"],
    #                 )
    #             ]
    #         ),
    #         pymortar.DataFrame(
    #             name="setpoints",
    #             aggregation=pymortar.MEAN,
    #             window="{}m".format(window),
    #             timeseries=[
    #                 pymortar.Timeseries(
    #                     view="{}_sps".format(sensor),
    #                     dataVars=["?sp"],
    #                 )
    #             ]
    #         )
    #     ],
    #     time=pymortar.TimeParams(
    #         start=eval_start_time,
    #         end=eval_end_time,
    #     )
    # )

    # call the fetch api
    # fetch_resp = client.fetch(request)
    # print(fetch_resp)

    # fetch point metadata
    sensor_view = client.sparql(sensor_query, sites=avail_sites).reset_index(drop=True)
    setpoint_view = client.sparql(setpoint_query, sites=avail_sites).reset_index(drop=True)

    fetch_resp = dict()
    fetch_resp['sensor'] = sensor_view
    fetch_resp['setpoint'] = setpoint_view

    # save time parameters
    fetch_resp['time'] = dict()
    fetch_resp['time']['start'] = eval_start_time
    fetch_resp['time']['end'] = eval_end_time
    fetch_resp['time']['window'] = window

    return fetch_resp
Esempio n. 19
0
    def __init__(self):
        """ Constructor. """

        self.client = pymortar.Client({})
Esempio n. 20
0
import pymortar
from pymortar.mortar_pb2 import FetchRequest

c = pymortar.Client('http://localhost:5001')

resp = c.qualify([
    "SELECT ?s ?equip WHERE { ?s a brick:Temperature_Sensor . ?s brick:isPointOf ?equip }",
    "SELECT ?vav WHERE { ?vav a brick:VAV . ?vav brick:feeds ?zone . ?zone a brick:HVAC_Zone }",
])

air_temp_sensor_query = """SELECT ?sensor ?equip WHERE {
    ?sensor    a     brick:Air_Temperature_Sensor .
    ?sensor    brick:isPointOf ?equip
}"""
air_temp_setpoint_query = """SELECT ?sp ?equip WHERE {
    ?sp    a     brick:Air_Temperature_Setpoint .
    ?sp    brick:isPointOf ?equip
}"""

qualify_resp = c.qualify([air_temp_sensor_query, air_temp_setpoint_query])
print(qualify_resp)
print("running on {0} sites".format(len(qualify_resp.sites)))
request = pymortar.FetchRequest(sites=qualify_resp.sites,
                                views=[
                                    pymortar.View(
                                        name="airtemp_sensors",
                                        definition=air_temp_sensor_query,
                                    ),
                                    pymortar.View(
                                        name="airtemp_sps",
                                        definition=air_temp_setpoint_query,