コード例 #1
0
ファイル: reuters2blp.py プロジェクト: royopa/test
def renameDIs():
    from mDataStore.globalMongo import mds
    lst = mds.find(library=mds.assetTS2)

    for l in lst:
        if l[2].name.startswith('OD'):
            df1 = mds.read(l[2].name,l[2].freq,library=mds.assetTS2)
            df1.md.name =df1.md.name.replace('OD','DI1')
            mds.write(df1,library=mds.assetTS2)
            mds.delete(l[2].name,l[2].freq,library=mds.assetTS2)
コード例 #2
0
ファイル: reuters2blp.py プロジェクト: royopa/test
def cnv2minuteAll(lst=None,library=mds.assetTS):
    ms = mds.find(library=library)

    for m in ms:
        if m[2].freq=='1Second': # and m[2].name.startswith('DIJ') and not m[2].name.startswith('DIJc')
            print('Converting {}'.format(m[2].name))
            nm1=m[2].name
            if not lst is None:
                if nm1 in lst:
                    cnv2minute1(nm1,library=mds.assetTS)
            else:
                cnv2minute1(nm1, library=mds.assetTS)
コード例 #3
0
ファイル: reuters2blp.py プロジェクト: royopa/test
def cnvAllReutersTickers(library=mds.assetTS):
    import xlwings as xw
    ms = mds.find(library=mds.assetTS)
    nms = [m[2].name for m in ms if m[2].freq=='1Second']


    d = {'.SA':'','DIJ':'DI1','IND':'BZ','c1':'1','c2':'2','DOL':'UC'}

    def replace1(s1,d):
        for k,v in d.items():
            s1=s1.replace(k,v)
        return s1

    nmsBLP = [replace1(nm,d) for nm in nms]

    return nms,nmsBLP
コード例 #4
0
ファイル: reuters2blp.py プロジェクト: royopa/test
def cp2onlineVS():
    from mDataStore.globalMongo import mds

    ms = mds.find(library=mds.assetVS)

    for i,m in enumerate(ms):

        nm = m[2].name
        print('Copying {} - {}/{}'.format(nm,i,len(ms)))
        if m[2].freq=='1BDay':
            df = mds.read(nm,'1BDay',mds.assetVS,date_range=[dt(1990,1,1),dt(2035,1,1)])
            try:
                mds.delete(nm,'1BDay',library=mds.onlineVS)
            except:
                pass

            mds.write(df,library=mds.onlineVS,check_metadata=False)
コード例 #5
0
def classifyTypesIntraday(library='assetTS'):
    '''
    Classify intraday types and subtypes series in assetTS. Reuters routine do not make such
    classification.
    :return:
    '''
    lib = getattr(mds, library)
    meta1 = mds.find(library=lib)
    for m in meta1:
        m = m[2]
        if '.SA' in m.name:
            m.type = 'equity'
            m.subtype = 'equity_nadj'
        elif 'c1' in m.name or 'c2' in m.name:
            m.type = 'future'
            m.subtype = 'fut_nrol'

        mds.write_metadata(m, library=lib)
コード例 #6
0
ファイル: credito_ampliado.py プロジェクト: royopa/test
df = df[list(df.keys())[0]]['Empresas e famílias'][0]
df = df.drop(columns=df.columns[6])
df = eu.deflate_df(df)
df = df.diff(12).dropna(axis=0, how='all')

ibc = pd.read_excel(
    r'F:\DADOS\ASSET\MACROECONOMIA\DADOS\Base de Dados\MCM\ATVIBC1.xls',
    sheet_name='IBC-Br')
ibc = ibc.iloc[2:, 0:2]
ibc = ibc.set_index(keys=ibc.columns[0])
ibc = ibc.iloc[:-6, :]
ibc = ibc.diff(12).dropna(axis=0, how='all')
ibc = ibc.loc[df.index[0]:, ]
ibc = ibc.rename(columns={'Unnamed: 1': 'ibc_br'})

ind_cred_list = mds.find(library=mds.econVS, **{'table': 'ind_credito'})
saldo_cred_list = mds.find(library=mds.econVS, **{'table': 'estoque_credito'})

data_list = []
for table in [ind_cred_list, saldo_cred_list]:
    for i_dic in table:
        if i_dic['real'] == 'yes' and i_dic['seasonality'] == 'nsa':
            data_list.append(i_dic)

credito, _ = mongo_load(m_list=data_list)

credito = credito.diff(12).dropna(axis=0, how='all')

df_general = pd.concat([df, credito], axis=1, join='inner')

for series in df_general:
コード例 #7
0
def load_db():
    # Dados Excel
    df_excel = pd.read_excel(
        r'F:\DADOS\ASSET\MACROECONOMIA\DADOS\Setor_Externo\Estudos\MODELO_BALANCA.xlsm',
        sheet_name='dados',
        index_col=0)
    df_excel = df_excel.pct_change(1, fill_method=None)
    idx = list(df_excel.index)
    new_idx = idx.copy()
    for i in range(len(idx)):
        new_idx[i] = idx[i].replace(day=1)
    new_idx = pd.Series(new_idx, index=df_excel.index)
    df_excel = pd.concat([df_excel, new_idx], axis=1)
    df_excel = df_excel.set_index(df_excel.columns[-1])

    x_exoil = df_excel.loc[:, 'x_exoil'].copy().dropna()
    x_oil = df_excel.loc[:, 'x_oil'].copy().dropna()
    m_exoil = df_excel.loc[:, 'm_exoil'].copy().dropna()
    m_oil = df_excel.loc[:, 'm_oil'].copy().dropna()

    df_l = df_excel.iloc[:, 4:].shift(3)

    names = list(df_l.columns)
    names_dic = dict(zip(names, np.zeros(len(names))))
    for key in names_dic:
        names_dic[key] = str(key) + '_3'
    df_l = df_l.rename(columns=names_dic)

    idx = list(df_l.index)
    new_idx = idx.copy()
    for i in range(len(idx)):
        new_idx[i] = idx[i].replace(day=1)
    new_idx = pd.Series(new_idx, index=df_l.index)
    df_l = pd.concat([df_l, new_idx], axis=1)
    df_l = df_l.set_index(df_l.columns[-1])

    # Dados Bloomberg (commodities)
    blp1 = rBLP()
    bbg = blp1.getHistoricData([
        'CO1 Comdty', 'S 1 COMB Comdty', 'LC1 COMB Comdty', 'XAU BGN Curncy',
        'KC1 Comdty', 'SB1 Comdty', 'CT1 Comdty', 'CNY REGN Curncy'
    ], ['PX_LAST'],
                               startDate=dt(2000, 1, 1),
                               endDate=dt(2100, 1, 1))
    bbg_names = [
        'petroleo', 'soja', 'boi', 'ouro', 'cafe', 'acucar', 'algodao', 'cny'
    ]
    bbg_dic = {}

    for i in range(len(bbg)):
        bbg_dic[bbg_names[i]] = bbg[i].rename(
            columns={'PX_LAST': bbg_names[i]})
    bbg_month = {}

    for i in bbg_dic.keys():
        bbg_month[i] = pd.Series(eu.month_mma(bbg_dic[i]).iloc[:, 0])

    bbg_month = pd.DataFrame(bbg_month)

    bbg_month = bbg_month.pct_change(1).shift(3)
    names = list(bbg_month.columns)
    names_dic = dict(zip(names, np.zeros(len(names))))
    for key in names_dic:
        names_dic[key] = str(key) + '_3'
    bbg_month = bbg_month.rename(columns=names_dic)

    # Dados de Atividade (PMC e PIM)
    pmc_list = mds.find(library=mds.econVS, **{'table': 'pmc'})
    pim_list = mds.find(library=mds.econVS, **{'table': 'pim'})

    all_list = pmc_list + pim_list

    m_list = [
        i for i in all_list if i['real'] == 'yes' and i['seasonality'] == 'nsa'
    ]

    activ_df, _ = mongo_load(m_list=m_list)
    activ_df = activ_df.pct_change(1).shift(3)
    names = list(activ_df.columns)
    names_dic = dict(zip(names, np.zeros(len(names))))
    for key in names_dic:
        names_dic[key] = str(key) + '_3'
    activ_df = activ_df.rename(columns=names_dic)

    activ_df = activ_df.loc['2002-05-01':, :]
    activ_df = activ_df.dropna(how='any', axis=1)

    # Juntando tudo e fazendo lags

    X = pd.concat([activ_df, df_l, bbg_month], axis=1)

    for series in X:
        series_6 = X[series].shift(3)
        series_6.name = series[:-2] + '_6'
        series_9 = X[series].shift(6)
        series_9.name = series[:-2] + '_9'
        series_12 = X[series].shift(9)
        series_12.name = series[:-2] + '_12'
        series_15 = X[series].shift(12)
        series_15.name = series[:-2] + '_15'
        series_18 = X[series].shift(15)
        series_18.name = series[:-2] + '_18'

        X = pd.concat([X, series_6, series_9, series_12, series_15, series_18],
                      axis=1)

    return X, x_exoil, x_oil, m_exoil, m_oil
コード例 #8
0
ファイル: S_clean_db.py プロジェクト: royopa/test
    def get_db(self,
               transform='transform_y',
               check_real=True,
               check_seas=True,
               min_y_sample=0.8,
               mongo_list=None):
        # Coleta as variaveis da base de dados que atingem os criterios de
        # compatibilidade em relacao a variavel de interesse
        md = self.y_md

        if transform == 'transform_y':
            self.x_transformation = self.y_transformation

        if mongo_list is not None:
            # se a mongo list nao esta vazia, busca um conjunto especifico de variaveis
            data_df, rel_date_df = mongo_load(m_list=mongo_list,
                                              transform=self.x_transformation,
                                              freq=md['freq_'])
            self.x_df, self.x_rel_date = data_df, rel_date_df
            return

        # calcula o tamanho minimo das variaveis explicativas
        if md['freq_'] == f.monthBegin:
            min_sample = round(
                ((self.y_endDT.year - self.y_stDT.year) * 12 +
                 self.y_endDT.month - self.y_stDT.month) * min_y_sample)
        elif md['freq_'] == f.quarterBegin:
            min_sample = round(
                ((self.y_endDT.year - self.y_stDT.year) * 4 +
                 self.y_endDT.quarter - self.y_stDT.quarter) * min_y_sample)

        # calcula a data minima de inicio da serie desejada
        min_stDT = self.y_data.index[len(self.y_data) - min_sample]

        # lista com a busca de todas as séries
        m_list = mds.find(library=mds.econVS)
        series_ref = []

        for series_md in m_list:
            # coleta o metadado da serie
            #series_md = series[2]
            include = True

            # performa os checks
            if check_real:
                if series_md['currency'] == md['currency']:
                    if series_md['real'] != md['real']:
                        include = False
                        continue

            if check_seas:
                if series_md['seasonality'] != md['seasonality']:
                    include = False
                    continue

            if series_md['stDT_econVS'] > min_stDT:
                include = False
                continue

            if series_md['endDT_econVS'] - self.y_endDT < timedelta(days=-1):
                include = False
                continue

            if include:
                series_ref.append(series_md)

        # faz o download da serie no mongo
        data_df, rel_date_df = mongo_load(m_list=series_ref,
                                          transform=self.x_transformation,
                                          freq=md['freq_'])

        self.x_df, self.x_rel_date = data_df, rel_date_df
        self.x_mongo_list = series_ref