Example #1
0
connection.cursor().execute(sql_drop_command)
connection.commit()

print(sql_create_command)
connection.cursor().execute(sql_create_command)
connection.commit()

#LOCAL indica que o arquivo está no cliente!
import os
theFile = os.getcwd() + os.path.sep + "unoeste_historico_de_chuva.csv"

print("\n\nIniciando leitura do arquivo: \n   " + theFile)

from myownapi.AnalyticsARIMA import AnalyticsARIMA
analytics = AnalyticsARIMA()
analytics.read_csv(theFile)

print("\nTratando variáveis!")
for columName in list(analytics.getColumnsNames()):
    analytics.tratarVariaveisNulasComMediaDasOutras(columName)

print("\nTransformando dataset para formato correto!")

#Tratando os dados (Vide Treino7)
import pandas as pd


def toDate(year, month):
    dateString = "{}-1-{}".format(month, int(year))
    return pd.to_datetime(dateString)
Example #2
0
#Dataset rela de uma empresa "The IWSR"
from myownapi.AnalyticsARIMA import AnalyticsARIMA

analytics = AnalyticsARIMA()
analytics.read_csv(
    'https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-cities-time.csv'
)
analytics.df.head(10)
analytics.info

df = analytics.df.filter(
    ['city', 'state', 'ibgeID', 'deaths', 'totalCases', 'date'])
df = df.loc[df['state'] == "SP"]

ibg_ids = df.filter(['ibgeID'])
ibg_ids = ibg_ids.drop_duplicates()

max = len(ibg_ids['ibgeID'])
contador = 0

import os
for ibge_id in ibg_ids['ibgeID']:
    contador = contador + 1
    try:
        city_df = df.loc[df['ibgeID'] == ibge_id]
        city_name = city_df['city'].iloc[0]

        print("[" + str(contador) + "/" + str(max) +
              "] Aplicando ARIMA na cidade - " + city_name)

        analytics = AnalyticsARIMA()
except:
    print("Erro ao acessar o banco de dados! Confira seus dados!")
    exit()
print("Conexão estabelecida com sucesso!!\n\n")

sql_select_command = "SELECT * FROM rain_history;"

print("Executando SQL Commands :")
print(sql_select_command)

cursor = connection.cursor()
cursor.execute(sql_select_command)
result = cursor.fetchall()

from myownapi.AnalyticsARIMA import AnalyticsARIMA
analytics = AnalyticsARIMA()

import pandas as pd

dateArray = []
valueArray = []

for entry in result:
    dateArray.append(entry[0])
    valueArray.append(entry[1])

dataFrameData = {'Date': dateArray, 'Value': valueArray}

original_df = pd.DataFrame(data=dataFrameData, columns=['Date', 'Value'])
analytics.setDataframe(original_df)
Example #4
0
#Dataset rela de uma empresa "The IWSR"
from myownapi.AnalyticsARIMA import AnalyticsARIMA

analytics = AnalyticsARIMA()
dataset_file = "C:/Users/Petrus/Desktop/UNESP/Docs 2016-2020/2019/Segundo Semestre/TCC2/TCC BigData Analytics/Treino11/cases-brazil-cities-time.csv"

analytics.read_csv(dataset_file)

df = analytics.df.filter(['date', 'city', 'deaths'])
ndf = df.loc[df['city'] == "São Paulo/SP"]
analytics.setDataframe(ndf)

analytics.arimaDefinirColunaObjetivo(nomeDaColunaObjetivo='deaths',
                                     nomeDaColunaDeDatas='date')
analytics.aplicarARIMA(verbose=True, ARIMA_SASONALIDADE=1)

#Export part
import json
json_original_all = json.loads(analytics.df.to_json())['deaths']

pred = analytics.ARIMAPredictionToPred(forecastStartingDate='2020-04-20')
json_pred_2020_04_20 = pred.predicted_mean.to_json()
json_pred_confidence_2020_04_20 = pred.conf_int().to_json()

pred = analytics.ARIMAForecastToPred(steps=20)
json_forecast_20Dias = pred.predicted_mean.to_json()
json_forecast_confidence_20Dias = pred.conf_int().to_json()

the_output = {
    "original":
    json_original_all,
Example #5
0
#Venda de shampoo durante 3 anos
#Fonte: LIVRO Time Series Data Library (citing: Makridakis, Wheelwright and Hyndman (1998))

#Descrição: This dataset describes the monthly number of sales of shampoo over a 3-year period.
from myownapi.AnalyticsARIMA import AnalyticsARIMA

analytics = AnalyticsARIMA()
dataset_file = "C:/Users/Petrus/Desktop/UNESP/Docs 2016-2020/2019/Segundo Semestre/TCC2/TCC BigData Analytics/Treino11/shampoo.csv"

analytics.read_csv(dataset_file)
analytics.arimaDefinirColunaObjetivo(nomeDaColunaObjetivo='Sales',
                                     nomeDaColunaDeDatas='Date')
analytics.aplicarARIMA()

import json
json_original_all = json.loads(analytics.df.to_json())['Sales']

pred = analytics.ARIMAPredictionToPred(forecastStartingDate="2013")
json_pred_2013 = pred.predicted_mean.to_json()
json_pred_confidence_2013 = pred.conf_int().to_json()

pred = analytics.ARIMAForecastToPred(steps=12)
json_forecast_2014 = pred.predicted_mean.to_json()
json_forecast_confidence_2014 = pred.conf_int().to_json()

the_output = {
    "original":
    json_original_all,
    "data": [{
        "name": "json_pred_2013",
        "type": "normal",
#Dataset rela de uma empresa "The IWSR"
from myownapi.AnalyticsARIMA import AnalyticsARIMA

analytics = AnalyticsARIMA()
dataset_file = "C:/Users/Petrus/Desktop/UNESP/Docs 2016-2020/2019/Segundo Semestre/TCC2/TCC BigData Analytics/Treino11/whiskeysales.csv"

analytics.read_csv(dataset_file)
analytics.tratarVariaveisNulasComMediaDasOutras('Cases')

import datetime


def yearToDate(year):
    return datetime.datetime(year, 1, 1)


analytics.arimaDefinirColunaObjetivo(nomeDaColunaObjetivo='Cases',
                                     nomeDaColunaDeDatas='Year',
                                     funcaoDeConversaDeDatas=yearToDate)
analytics.aplicarARIMA(verbose=True, ARIMA_SASONALIDADE=1)

#Export part
import json
json_original_all = json.loads(analytics.df.to_json())['Cases']

pred = analytics.ARIMAPredictionToPred(forecastStartingDate='1-1-2010')
json_pred_2010 = pred.predicted_mean.to_json()
json_pred_confidence_2010 = pred.conf_int().to_json()

pred = analytics.ARIMAForecastToPred(steps=10)
json_forecast_10Years = pred.predicted_mean.to_json()