Example #1
0
def get_act_data(year=None, dpto=None, mpio=None, ipcc=None):
    year_q = ''
    dpto_q = ''
    mpio_q = ''
    ipcc_q = ''
    query = """ SELECT cod_depto, cod_muni, id_reg_ganadera, ano, id_ani_tipo_ipcc, numero, cod_pais,
                D.nombre as depto, M.nombre as muni,
                P.nombre as pais, R.nom_region as region, T.tipo as tipo
                FROM datos_act_bovinos_ipcc as A
                INNER JOIN pais             as P ON P.codigo = A.cod_pais
                INNER JOIN departamento     as D ON D.codigo = A.cod_depto
                INNER JOIN animal_tipo      as T ON T.id_animal_tipo = A.id_ani_tipo_ipcc
                INNER JOIN region_ganadera  as R ON R.id_region = A.id_reg_ganadera
                INNER JOIN municipio        as M ON M.codigo = A.cod_muni
    """
    if year:
        year_q = """WHERE ano in {0}
                 """.format(str(year).replace('[', '(').replace(']', ')'))
    if dpto:
        dpto_q = """AND codigo_depto in {0}
                 """.format(str(dpto).replace('[', '(').replace(']', ')'))
    if mpio:
        mpio_q = """AND cod_muni in {0}
                 """.format(str(mpio).replace('[', '(').replace(']', ')'))
    if ipcc:
        ipcc_q = """AND id_ani_tipo_ipcc in {0} 
                 """.format(str(ipcc).replace('[', '(').replace(']', ')'))
    query = query + year_q + dpto_q + mpio_q + ipcc_q
    df_actividad = pd.read_sql_query(query, con=pg_connection_str())
    df_actividad.cod_muni = df_actividad.cod_muni.map('{:05}'.format)
    return df_actividad
Example #2
0
def get_act_data():
    query = """ SELECT DA.id, ano as año, id_reg_ganadera, id_ani_tipo_ipcc, numero,
                FE.fe_fermentacion_ent as fe, FE.fe_gestion_est as gen
                FROM datos_act_bovinos_ipcc_proyectados as DA
                INNER JOIN fe_fermentacion_temporal as FE ON FE.id_reg = DA.id_reg_ganadera AND 
                FE.id_at = DA.id_ani_tipo_ipcc
            """
    df_bovinos = pd.read_sql_query(query, con=pg_connection_str())
    query_2 = "SELECT DA.id, DA.ano as año, DA.id_reg_ganadera, DA.id_ani_tipo_ipcc, DA.numero," \
              "V.fe, V.gen FROM datos_act_bovinos_ipcc_proyectados as DA " \
              "INNER JOIN (select id_animal_tipo_ipcc,  AVG(fe_ch4_fermentacin_enterica) as fe, " \
              "avg(fe_ch4_gestion_estiercol) as gen from fe_otras_especies group by (id_animal_tipo_ipcc)) as V " \
              "on V.id_animal_tipo_ipcc = DA.id_ani_tipo_ipcc "
    df_others = pd.read_sql_query(query_2, con=pg_connection_str())
    df = pd.concat([df_bovinos, df_others], ignore_index=True)
    df = df.sort_values(by=['id'])
    return df
Example #3
0
def get_act_data():
    query = """ SELECT DA.id, ano as año, cod_muni, id_reg_ganadera, id_ani_tipo_ipcc, numero, emision_fe, emision_ge,
                FE.fe_fermentacion_ent as fe, FE.fe_gestion_est as gen
                FROM datos_act_bovinos_ipcc_temporal as DA
                INNER JOIN fe_fermentacion_temporal as FE ON FE.id_reg = DA.id_reg_ganadera AND 
                FE.id_at = DA.id_ani_tipo_ipcc
            """
    df = pd.read_sql_query(query, con=pg_connection_str())
    return df
Example #4
0
def upload_data(df, tb_name):
    """
    https://stackoverflow.com/questions/23103962/how-to-write-dataframe-to-postgres-table
    :param df: data frame to upload
    :param tb_name: table name
    :return:
    """
    conn = pg_connection_str()
    df.to_sql(tb_name, conn)
    print("Done!")
Example #5
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import json
import plotly.express as px
import plotly.graph_objects as go
from src.database.db_utils import pg_connection_str

df_act = pd.read_csv('/home/alfonso/Documents/afolu/results/act.csv')
df_gis = pd.read_csv('/home/alfonso/Documents/afolu/results/muni.csv')
df_at = pd.read_sql_table('animal_tipo', con=pg_connection_str())
df_gis['DPTO_CCDGO'] = df_gis['DPTO_CCDGO'].astype(str)
dt_year = [{'label': f'{i}', 'value': f'{i}'} for i in df_act.ano.unique()]
dt_dptos = [{
    'label': f'{df_gis.DPTO_CNMBR[df_gis.DPTO_CCDGO.isin([i])].values[0]}',
    'value': f'{i}'
} for i in df_act.cod_depto.unique().astype(str)]
dt_mpios = [{
    'label': f'{df_gis.MPIO_CNMBR[df_gis.MPIO_CCNCT.isin([i])].values[0]}',
    'value': f'{i}'
} for i in df_act.cod_muni.unique().astype(str)]
dt_at = [{
    'label': f'{df_at.tipo[df_at.id_animal_tipo.isin([i])].values[0]}',
    'value': f'{i}'
} for i in df_at.id_animal_tipo]
with open('/home/alfonso/Documents/gis/Muni/mcpios.geojson') as json_file:
    mun = json.load(json_file)


def get_act_data(year=None, dpto=None, mpio=None, ipcc=None):
    year_q = ''
Example #6
0
def deforestacion(id_bioma=None, year=None, id_type=1, id_report=1):
    df = pd.read_sql_query(get_query_da(id_bioma=id_bioma, year=year),
                           con=pg_connection_str())
    if df.empty:
        raise ValueError('Esta consulta no tiene datos')
    df.rename(columns=dict([('id', 'idx')]), inplace=True)
    df_subcat = pd.read_sql('deforestacion_subcategorias_ipcc',
                            con=pg_connection_str())
    df_subcat.rename(columns=dict([('nombre', 'nombre_id'),
                                   ('id_datos_actividad', 'nombre')]),
                     inplace=True)
    df_fact_cab = pd.read_sql('deforestacion_factores',
                              con=pg_connection_str())
    df = pd.melt(df.reset_index(),
                 id_vars=['id_bioma', 'ano', 'ha_his', 'ha_pro'],
                 var_name='nombre',
                 value_name='porc_cobert_cambio',
                 value_vars=df.columns[4:]).reset_index()
    df = pd.merge(df,
                  df_subcat[['nombre', 'id_subcat_ipcc',
                             'subcat_ipcc_number']],
                  on=['nombre'],
                  how='left').drop(['nombre'], axis=1)
    df = df[[
        'ano', 'id_bioma', 'subcat_ipcc_number', 'id_subcat_ipcc', 'ha_his',
        'ha_pro', 'porc_cobert_cambio'
    ]]
    df = df.sort_values(by=['id_bioma', 'id_subcat_ipcc'])
    df['ha_pro'] = df['ha_pro'].fillna(df['ha_his'])
    # inner join usando el bioma o region Contenidos de CO2 equivalente del Bosque Natural (tCO2)
    df = df.merge(df_fact_cab[['id_bioma', 'biomasa_total_co2', 'mom_c02']],
                  on=['id_bioma'],
                  how='left')

    # inner join usando la subcategoria ipcc Contenidos de CO2 equivalente de la nueva cobertura (tCO2)
    df = df.merge(
        df_fact_cab[['id_subcat_ipcc', 'biomasa_total_co2', 'mom_c02']],
        on=['id_subcat_ipcc'],
        how='left')

    df['tipif_ha_hist'] = df['ha_his'] * df['porc_cobert_cambio'] / 100
    df['tipif_ha_proy'] = df['ha_pro'] * df['porc_cobert_cambio'] / 100
    df.rename(columns=dict([('biomasa_total_co2_x', 'bt_co2_equiv_bos_nat'),
                            ('biomasa_total_co2_y', 'bt_co2_equiv_nuev_cob'),
                            ('mom_c02_x', 'mom_co2_equiv_bos_nat'),
                            ('mom_c02_y', 'mom_co2_equiv_nuev_cob')]),
              inplace=True)

    # Emisiones brutas y netas por deforestacion del bosque natural - histórico
    df['em_bruta_bnat_hist'] = df['tipif_ha_hist'] * df['bt_co2_equiv_bos_nat']
    df['cont_rema_bnat_hist'] = df['tipif_ha_hist'] * df[
        'bt_co2_equiv_nuev_cob']
    df['emi_neta_bnat_hist'] = df['cont_rema_bnat_hist'] - df[
        'em_bruta_bnat_hist']

    # Emisiones brutas y netas por deforestacion de la MOM - histórico
    df['em_bruta_mom_hist'] = df['tipif_ha_hist'] * df['mom_co2_equiv_bos_nat']
    df['cont_rema_mom_hist'] = df['tipif_ha_hist'] * df[
        'mom_co2_equiv_nuev_cob']
    df['emi_neta_mom_hist'] = df['cont_rema_mom_hist'] - df['em_bruta_mom_hist']

    # Emisiones brutas y netas por deforestacion del bosque natural - Proyectado
    df['em_bruta_bnat_nref'] = df['tipif_ha_proy'] * df['bt_co2_equiv_bos_nat']
    df['cont_rema_bnat_nref'] = df['tipif_ha_proy'] * df[
        'bt_co2_equiv_nuev_cob']
    df['emi_neta_bnat_nref'] = df['cont_rema_bnat_nref'] - df[
        'em_bruta_bnat_nref']

    # Emisiones brutas y netas por deforestacion de la MOM - Proyectado
    df['em_bruta_mom_nref'] = df['tipif_ha_proy'] * df['mom_co2_equiv_bos_nat']
    df['cont_rema_mom_nref'] = df['tipif_ha_proy'] * df[
        'mom_co2_equiv_nuev_cob']
    df['emi_neta_mom_nref'] = df['cont_rema_mom_nref'] - df['em_bruta_mom_nref']

    df_res = df.groupby(
        by=['id_bioma', 'ano', 'subcat_ipcc_number'], as_index=False)[[
            'tipif_ha_hist', 'tipif_ha_proy', 'em_bruta_bnat_hist',
            'cont_rema_bnat_hist', 'emi_neta_bnat_hist', 'em_bruta_mom_hist',
            'cont_rema_mom_hist', 'emi_neta_mom_hist', 'em_bruta_bnat_nref',
            'cont_rema_bnat_nref', 'emi_neta_bnat_nref', 'em_bruta_mom_nref',
            'cont_rema_mom_nref', 'emi_neta_mom_nref'
        ]].sum().reset_index()

    df_res = df_res.sort_values(by=['id_bioma', 'subcat_ipcc_number', 'ano'])
    df_res = pd.merge(df_res,
                      df_subcat[['subcat_ipcc_number',
                                 'subcat_ipcc']].drop_duplicates(),
                      on=['subcat_ipcc_number'],
                      how='left').drop(['subcat_ipcc_number', 'index'], axis=1)
    df_res = pd.pivot(df_res, index=['id_bioma', 'ano'], columns='subcat_ipcc')

    if (id_type == 1) and (id_report == 1):  # Filtro por datos de actividad
        df_res = df_res['tipif_ha_hist'].reset_index()
    elif (id_type == 1) and (id_report == 2):
        df_res = df_res['tipif_ha_proy'].reset_index()
    elif (id_type == 2) and (id_report == 3):  # Filtro por datos de biomasa
        df_res = df_res['emi_neta_bnat_hist'].reset_index()
    elif (id_type == 2) and (id_report == 4):
        df_res = df_res['em_bruta_bnat_hist'].reset_index()
    elif (id_type == 2) and (id_report == 5):
        df_res = df_res['emi_neta_bnat_nref'].reset_index()
    elif (id_type == 2) and (id_report == 6):
        df_res = df_res['em_bruta_bnat_nref'].reset_index()
    elif (id_type == 3) and (id_report == 3):  # Filtro por datos de Mom
        df_res = df_res['emi_neta_mom_hist'].reset_index()
    elif (id_type == 3) and (id_report == 4):
        df_res = df_res['em_bruta_mom_hist'].reset_index()
    elif (id_type == 3) and (id_report == 5):
        df_res = df_res['emi_neta_mom_nref'].reset_index()
    elif (id_type == 3) and (id_report == 6):
        df_res = df_res['em_bruta_mom_nref'].reset_index()
    else:  # Otros filtros por aplicar
        df_res = df_res['tipif_ha_hist'].reset_index()
        print(
            'Estos filtros aun no han sido aplicados, datos desplegados son de Actividad'
        )

    df_biomas = pd.read_sql('deforestacion_biomas', con=pg_connection_str())
    df_biomas.rename(columns=dict([('id', 'id_bioma')]), inplace=True)
    df_res = pd.merge(df_res,
                      df_biomas[['nombre', 'id_bioma']],
                      on=['id_bioma'],
                      how='left').drop(['id_bioma'], axis=1)
    df_res.rename(columns=dict([('nombre', 'bioma')]), inplace=True)
    cols = list(df_res.columns)
    cols = [cols[-1]] + cols[:-1]
    df_res = df_res[cols]
    df_res.to_sql('deforestacion_resultado',
                  con=pg_connection_str(),
                  if_exists='replace',
                  index=False,
                  method="multi",
                  chunksize=5000)
    print('Done!')
Example #7
0
def get_data() -> pd.DataFrame:
    conn = pg_connection_str()
    query = "SELECT * " \
            "FROM fe_fermentacion"
    df = pd.read_sql_query(query, conn)
    return df