Exemplo n.º 1
0
def daytime_work_comsumption(session, epmdataobject, starttime, endtime):
    '''calculates the energy consumption in business time'''

    try:
        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=300)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)
        data = epmdataobject.historyReadAggregate(aggregationdetails,
                                                  queryperiod)

    except:
        raise Exception('Error in read aggregation')

    # EPM return ndarray, we need to transform in pandas dataframe
    df = pd.DataFrame({
        'Value': data['Value'].tolist(),
        'Timestamp': data['Timestamp'].tolist()
    })

    # set Timestamp as index in dataframe
    df.set_index('Timestamp', inplace=True)

    # filter work hours
    df = df.between_time('08:00', '18:00')

    # filter business days
    df = df.asfreq(BDay())

    total = df['Value'].sum()

    print(total)

    return epr.ScopeResult(True)
def predict_variable(epmdataobject, starttime, endtime, interval_seconds):

    try:
        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=interval_seconds)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)
        data = epmdataobject.historyReadAggregate(aggregationdetails,
                                                  queryperiod)

    except:
        raise Exception('Error in read aggregation')

    #preprocessing and clening data
    #creating array with dataobject values and index

    x = data['Value']
    y = np.array(np.arange(len(x)))

    x = x[~sp.isnan(y)]
    y = y[~sp.isnan(y)]

    import matplotlib.pyplot as plt
    plt.scatter(x, y)
    plt.title("Web traffic over the last month")
    plt.xlabel("Time")
    plt.ylabel("Hits/hour")
    plt.xticks([w * 7 * 24 for w in range(10)],
               ['week %i' % w for w in range(10)])
    plt.autoscale(tight=True)
    plt.grid()
    plt.show()
def weekly_report(session, dataobject_temperature, dataobject_windspeed,
                  write_dataobject_temperature, write_dataobject_windspeed):
    '''Doc '''
    end_date = session.timeEvent
    ini_date = end_date - datetime.timedelta(weeks=1)
    query_period = epm.QueryPeriod(ini_date, end_date)
    process_interval = datetime.timedelta(minutes=10)

    aggregate_details = epm.AggregateDetails(process_interval,
                                             epm.AggregateType.Interpolative)

    temperature = dataobject_temperature.historyReadAggregate(
        aggregate_details, query_period)
    windspeed = dataobject_windspeed.historyReadAggregate(
        aggregate_details, query_period)

    pd_temperature = pd.DataFrame({
        'Value':
        temperature['Value'].tolist(),
        'Timestamp':
        temperature['Timestamp'].tolist()
    })
    pd_windspeed = pd.DataFrame({
        'Value': windspeed['Value'].tolist(),
        'Timestamp': windspeed['Timestamp'].tolist()
    })
Exemplo n.º 4
0
def mondays_describe(session, epmdataobject, starttime, endtime):
    '''Shows the statistical describe of every Monday in period'''

    try:
        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=300)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)
        data = epmdataobject.historyReadAggregate(aggregationdetails,
                                                  queryperiod)

    except:
        raise Exception('Error in read aggregation')

    # EPM return ndarray, we need to transform in pandas dataframe
    df = pd.DataFrame({
        'Value': data['Value'].tolist(),
        'Timestamp': data['Timestamp'].tolist()
    })
    #creating a new column with the weekday name
    df['day_of_week'] = df['Timestamp'].dt.weekday_name

    #fitering by Monday
    filtered_data = df[df.day_of_week == 'Monday']

    print(filtered_data.describe())
Exemplo n.º 5
0
def get_history_raw(session, epmdataobject):
    '''Get one hour historic data from epmtag'''

    endtime = session.timeEvent

    initime = endtime - datetime.timedelta(hours=1)

    queryperiod = epm.QueryPeriod(initime, endtime)

    data = epmdataobject.historyReadRaw(queryperiod)

    print("Initial Time:{} \nEnd Time:{} \nData:{}".format(
        initime, endtime, data))
Exemplo n.º 6
0
def get_history_interpolative(epmdataobject, starttime, endtime,
                              interval_seconds):

    try:
        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=interval_seconds)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)
        data = epmdataobject.historyReadAggregate(aggregationdetails,
                                                  queryperiod)

    except:
        raise Exception('Error in read aggregation')

    print(data)

    return epr.ScopeResult(True)
Exemplo n.º 7
0
def predict_variable(session, epmdataobject1, epmdataobject2, starttime, endtime, write_dataobject):
    url = 'http://servicos.cptec.inpe.br/XML/estacao/SBGR/condicoesAtuais.xml'

    from xml.etree import ElementTree
    response = requests.get(url)
    tree = ElementTree.fromstring(response.content)
    wind = tree.find('vento_int').text

    wind = float(wind)
    print('Previsão de vento para a próxima hora: {}'.format(wind))
    try:

        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=600)
        aggregationdetails = epm.AggregateDetails(processInterval, epm.AggregateType.Interpolative)

        data1 = epmdataobject1.historyReadAggregate(aggregationdetails, queryperiod)
        data2 = epmdataobject2.historyReadAggregate(aggregationdetails, queryperiod)
    except Exception:
        print('Erro ao consultar dataobjects')

    df1 = pd.DataFrame(
        {'Value': data1['Value'].tolist()})

    df2 = pd.DataFrame(
        {'Value': data2['Value'].tolist()})

    y = df1.iloc[:, 0:1].values

    X = df2.iloc[:, 0:1].values

    # Fit regression model

    regr_1 = DecisionTreeRegressor(max_depth=5)
    regr_1.fit(X, y)

    # Predict
    y_predicted = regr_1.predict(wind)

    print('Previsão de potência baseado no modelo gerado em árvore de decisão: {}'.format(float(y_predicted[0])))

    date = datetime.datetime.now()
    value = float(y_predicted[0])
    quality = 0  # zero is Good in OPC UA

    write_dataobject.write(value, date, quality)
Exemplo n.º 8
0
def weekly_report(session, temperature, windspeed, pathname):
    """
    Write weekly statistics in EPM Server
    """
    starttime = session.timeEvent
    endtime = starttime - datetime.timedelta(weeks=1)

    try:
        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(minutes=15)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)
        temperature_data = temperature.historyReadAggregate(
            aggregationdetails, queryperiod)
        windspeed_data = windspeed.historyReadAggregate(
            aggregationdetails, queryperiod)

    except:
        raise Exception('get interpolative data error')
Exemplo n.º 9
0
def filter_lower_limit(session, epmdataobject, limit, starttime, endtime):
    '''filter by a limit'''
    try:
        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=300)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)
        data = epmdataobject.historyReadAggregate(aggregationdetails,
                                                  queryperiod)

    except:
        raise Exception('Error in read aggregation')

    # EPM return ndarray, we need to transform in pandas dataframe
    df = pd.DataFrame({
        'Value': data['Value'].tolist(),
        'Timestamp': data['Timestamp'].tolist()
    })

    df = df.reset_index()

    df = df.loc[df['A'] > limit]
    print(df)
Exemplo n.º 10
0
except Exception:
    logger.error("can't found {} in the epm Server".format(bvname))
    exit(1)

try:
    dataobject2 = connection.getDataObjects('SP01_WindSpeedAvg')

except Exception:
    logger.error("can't found {} in the epm Server".format(bvname))
    exit(1)

starttime = datetime.datetime(2014, 3, 1, 1, 00, 00)
endtime = datetime.datetime(2014, 3, 30, 1, 00, 00)

queryperiod = epm.QueryPeriod(starttime, endtime)
processInterval = datetime.timedelta(seconds=600)
aggregationdetails = epm.AggregateDetails(processInterval,
                                          epm.AggregateType.Interpolative)

data1 = dataobject1['SP01_PowerAvg'].historyReadAggregate(
    aggregationdetails, queryperiod)
data2 = dataobject2['SP01_WindSpeedAvg'].historyReadAggregate(
    aggregationdetails, queryperiod)

df1 = pd.DataFrame({'Value': data1['Value'].tolist()})

df2 = pd.DataFrame({'Value': data2['Value'].tolist()})

y = df1.iloc[:, 0:1].values
def predict_variable(session, epmdataobject1, epmdataobject2, starttime,
                     endtime, connection, pathname):

    r = requests.get(
        'http://api.openweathermap.org/data/2.5/weather?q=London&APPID={1c8911a86601fec454d7f103939e5191}'
    )
    print(r.json())

    try:

        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=600)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)

        data1 = epmdataobject1.historyReadAggregate(aggregationdetails,
                                                    queryperiod)
        data2 = epmdataobject2.historyReadAggregate(aggregationdetails,
                                                    queryperiod)
    except Exception:
        print('Erro ao consultar dataobjects')

    df1 = pd.DataFrame({'Value': data1['Value'].tolist()})

    df2 = pd.DataFrame({'Value': data2['Value'].tolist()})

    y = df1.iloc[:, 0:1].values

    X = df2.iloc[:, 0:1].values

    X_test = np.array([0, 3, 6, 9, 12, 15, 18])
    X_test = X_test.reshape(-1, 1)

    # Fit regression model
    regr_1 = DecisionTreeRegressor(max_depth=2)
    regr_2 = DecisionTreeRegressor(max_depth=5)
    regr_1.fit(X, y)
    regr_2.fit(X, y)

    # Predict

    y_1 = regr_1.predict(X_test)
    print(y_1)
    print(X_test)
    y_2 = regr_2.predict(X_test)

    print(y_2)
    print(X_test)

    # Plot the results
    plt.figure()
    plt.scatter(X, y, color='red', label="data")
    plt.plot(X_test,
             regr_1.predict(X_test),
             color="blue",
             label="max_depth=2",
             linewidth=2)
    plt.plot(X_test,
             regr_2.predict(X_test),
             color="yellow",
             label="max_depth=5",
             linewidth=2)
    plt.xlabel("data")
    plt.ylabel("target")
    plt.title("Decision Tree Regression")
    plt.legend()
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    epmConn = getFirstItemFromODict(session.connections)
    epResourceManager = epmConn.getPortalResourcesManager()
    imgFolder = epResourceManager.getResource(pathname)

    resource = imgFolder.upload('predict_Decision_tree_regression.png',
                                buf,
                                'Scatterplot gerado pelo Processor',
                                mimetypes.types_map['.png'],
                                overrideFile=True)
Exemplo n.º 12
0
def scatter_plot(session, connection, epmdataobject_1, epmdataobject_2,
                 starttime, endtime, pathname):
    """
    **ScatterPlot**

                Este método gera faz a consulta interpolada de dois Dataobjects e gera um arquivo de imagem, contendo
                gráfico do tipo Scatter, salvo nos resources do Portal.

                            :param session: objeto *session* do EPM Processor
                            :parm connection: connection para os Resources
                            :param epmdataobject_1: dataobject.
                            :param epmdataobject_2: dataobject.
                            :param starttime: datetime de início da consulta.
                            :param endtime: datetime de fim da consulta.
                            :param pathname: Diretório dentro de Resources do Portal
                            :type filename: nome do arquivo a ser salvo.


                            .. note::
                                Esta função serve apenas como validação de caso de uso do sistema.


                            .. warning::
                                Não é necessário informar **Process Interval**.

                                No modo de *TEST* o resultado é apenas impresso na tela.

                                Não é feita distinção entre execuções de : *TEST*, *PRODUCTION* e *SIMULATION*, ou seja
                                o resultado sempre será escrita em em arquivo em caso de sucesso na execução.


                            """

    try:
        queryperiod = epm.QueryPeriod(starttime, endtime)
        processInterval = datetime.timedelta(seconds=300)
        aggregationdetails = epm.AggregateDetails(
            processInterval, epm.AggregateType.Interpolative)
        data1 = epmdataobject_1.historyReadAggregate(aggregationdetails,
                                                     queryperiod)
        data2 = epmdataobject_2.historyReadAggregate(aggregationdetails,
                                                     queryperiod)

    except:
        raise Exception('Error in read aggregation')

    #gera o chart e salva em buffer
    plt.scatter(data1['Value'], data2['Value'])
    plt.title('Scatter Plot')
    bufBoxplot = io.BytesIO()
    plt.savefig(bufBoxplot, format='png')
    bufBoxplot.seek(0)

    print(session.connections)
    epmConn = getFirstItemFromODict(session.connections)
    epResourceManager = epmConn.getPortalResourcesManager()
    imgFolder = epResourceManager.getResource(pathname)

    resource = imgFolder.upload('scatter.png',
                                bufBoxplot,
                                'Scatterplot gerado pelo Processor',
                                mimetypes.types_map['.png'],
                                overrideFile=True)