def main(): tb_w = sf.Table("w", "indicator") tb_m = sf.Table("m", "indicator") engines = cfg.load_engine() engine_read = engines["2Gb"] engine_write = engines["2Gb"] conn_read = engine_read.connect() conn_write = engine_write.connect() tb_names = [ _tb["re_w"], _tb["ri_w"], _tb["sub_w"], _tb["re_m"], _tb["ri_m"], _tb["sub_m"] ] sqls = gen_sql_indicator_latest(tb_names) dfs = fetch_data(sqls, conn_read) dw = merge_result(dfs["w"], how="inner") dm = merge_result(dfs["m"], how="inner") refresh(dw, tb_w, conn_write) refresh(dm, tb_m, conn_write)
import calendar as cld, datetime as dt, time from dateutil.relativedelta import relativedelta import numpy as np, pandas as pd from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.database import sqlfactory as sf, config as cfg, io import utils.script.scriptutils as su table = sf.Table("m", "oresearch") columns = [ "org_id", "org_name", "benchmark", "index_method", "index_range", "funds_number", "type_code", "type_name", "stype_code", "stype_name", "statistic_date" ] columns.extend(table.columns()) # engines = cfg.load_engine() engine_read = engines["2Gb"] engine_wt = engines["2Gb"] process_date = dt.date.today() - dt.timedelta(1) def calculate(): df_whole = pd.DataFrame() conn = engine_read.connect() year = process_date.year month = process_date.month month_range = cld.monthrange(year, month)[1]
import calendar as cld, datetime as dt from dateutil.relativedelta import relativedelta import numpy as np, pandas as pd from utils.database import sqlfactory as sf, config as cfg, io from utils.script import scriptutils as su from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.script.scriptutils import tic table = sf.Table("m", "sub") ###w->m columns = ["fund_id", "statistic_date", "benchmark"] columns.extend(table.columns()) engines = cfg.load_engine() engine_rd = engines["2Gb"] engine_wt = engines["2Gb"] df_whole = pd.DataFrame() for year in range(2017, 2018): for month in range(2, 3): conn = engine_rd.connect() now = dt.datetime.now() year, month = year, month month_range = cld.monthrange(year, month)[1] time_to_fill = sf.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month month_range = time_to_fill.month_range
import calendar as cld import datetime as dt, time from dateutil import relativedelta import multiprocessing import numpy as np, pandas as pd from utils.algorithm import timeutils as tu from utils.database import sqlfactory as sf, config as cfg, io from utils.script import scriptutils as su engines = cfg.load_engine() engine_rd = engines["2Gb"] table = sf.Table("m", "index") process_date = dt.date.today() - dt.timedelta(1) def calculate(idx): dfs = pd.DataFrame() PEIndex = sf.PEIndex(idx) first_year = PEIndex.firstyear result_r = {} components_num = {} for year in range(first_year, process_date.year + 1): if year == process_date.timetuple().tm_year: month = process_date.month else: month = 12
import calendar as cld, datetime as dt import numpy as np, pandas as pd from utils.database import sqlfactory as sf, config as cfg, io import utils.script.scriptutils as su from dateutil.relativedelta import relativedelta from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.script.scriptutils import tic table = sf.Table("w", "risk") columns = ["fund_id", "statistic_date", "benchmark"] columns.extend(table.columns()) engines = cfg.load_engine() engine_wt = engines["2Gb"] now = dt.datetime.now() df_whole = pd.DataFrame() for year in range(2017, 2018): for month in range(1, 1 + 1): conn = engines["2Gb"].connect() year, month = year, month month_range = cld.monthrange(year, month)[1] time_to_fill = sf.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month month_range = time_to_fill.month_range sql_bm = sf.SQL.market_index( time_to_fill.today) # Get benchmark prices sql_pe = sf.SQL.pe_index(time_to_fill.today, freq="w")
import calendar as cld import datetime as dt, time import numpy as np, pandas as pd from dateutil.relativedelta import relativedelta from functools import partial from utils.algorithm import timeutils as tu from utils.database import sqlfactory as sf, config as cfg, io from utils.script import scriptutils as su import multiprocessing engines = cfg.load_engine() engine_rd = engines["2Gb"] engine_wt = engines["2Gb"] _process_date = dt.date.today() table = sf.Table("w", "index") def calculate(idx, export_path=None): dfs = pd.DataFrame() PEIndex = sf.PEIndex(idx) first_date = PEIndex.firstmonday result_r = {} components_num = {} components = {} for year in range(first_date.timetuple().tm_year, _process_date.year + 1): if year == _process_date.year: month = _process_date.month
import calendar as cld, datetime as dt import numpy as np, pandas as pd from utils.database import sqlfactory as sf, config as cfg, io import utils.script.scriptutils as su from dateutil.relativedelta import relativedelta from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.script.scriptutils import tic table = sf.Table("w", "sub") columns = ["fund_id", "statistic_date", "benchmark"] columns.extend(table.columns()) engines = cfg.load_engine() engine_wt = engines["2Gb"] now = dt.datetime.now() df_whole = pd.DataFrame() for year in range(2017, 2018): for month in range(1, 2): conn = engines["2Gb"].connect() month_range = cld.monthrange(year, month)[1] time_to_fill = sf.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month month_range = time_to_fill.month_range sql_bm = sf.SQL.market_index( time_to_fill.today) # Get benchmark prices sql_pe = sf.SQL.pe_index(time_to_fill.today, freq="w")
import calendar as cld, datetime as dt from dateutil.relativedelta import relativedelta import numpy as np, pandas as pd from utils.database import sqlfactory as du, config as cfg, io from utils.script import scriptutils as su from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.script.scriptutils import tic table = du.Table("m", "return") ###w->m columns = ["fund_id", "statistic_date", "benchmark"] columns.extend(table.columns()) engines = cfg.load_engine() engine_rd = engines["2Gb"] engine_wt = engines["2Gb"] df_whole = pd.DataFrame() for year in range(2017, 2018): for month in range(3, 4): conn = engine_rd.connect() now = dt.datetime.now() year, month = year, month month_range = cld.monthrange(year, month)[1] time_to_fill = du.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month month_range = time_to_fill.month_range sql_bm = du.SQL.market_index(time_to_fill.today) # Get benchmark prices
import calendar as cld, datetime as dt import numpy as np, pandas as pd from utils.database import sqlfactory as sf, config as cfg, io from utils.script import scriptutils as su from dateutil.relativedelta import relativedelta from utils.algorithm import fund_indicator as fi, timeutils as tu table = sf.Table("w", "bm") columns = ["index_id", "index_name", "statistic_date"] columns.extend(table.columns()) yesterday = dt.date.today() - dt.timedelta(1) engines = cfg.load_engine() engine_rd = engines["2Gb"] def calculate(): conn = engine_rd.connect() year, month = yesterday.year, yesterday.month month_range = cld.monthrange(year, month)[1] time_to_fill = sf.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month bms_used = [ "hs300", "csi500", "sse50", "ssia", "cbi", "y1_treasury_rate", "nfi" ] sql_bm = sf.SQL.market_index(date=time_to_fill.today, benchmarks=bms_used, whole=True) # Get benchmark prices bm = pd.read_sql(sql_bm, conn)
import calendar as cld, datetime as dt, time from dateutil.relativedelta import relativedelta import numpy as np, pandas as pd from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.database import sqlfactory as sf, config as cfg, io import utils.script.scriptutils as su table = sf.Table("m", "orisk") columns = [ "org_id", "org_name", "benchmark", "index_method", "index_range", "funds_number", "type_code", "type_name", "stype_code", "stype_name", "statistic_date" ] columns.extend(table.columns()[:-16]) columns.extend(table.columns()[-8:]) # engines = cfg.load_engine() engine_rd = engines["2Gb"] engine_wt = engines["2Gb"] process_date = dt.date.today() - dt.timedelta(1) def calculate(): conn = engine_rd.connect() df_whole = pd.DataFrame() year = process_date.year month = process_date.month
import calendar as cld, datetime as dt, time from dateutil.relativedelta import relativedelta import numpy as np, pandas as pd from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.database import sqlfactory as sf, config as cfg, io import utils.script.scriptutils as su table = sf.Table("m", "oreturn") columns = [ "org_id", "org_name", "benchmark", "index_method", "index_range", "funds_number", "type_code", "type_name", "stype_code", "stype_name", "statistic_date" ] columns.extend(table.columns()) # engines = cfg.load_engine() engine_read = engines["2Gb"] engine_wt = engines["2Gb"] process_date = dt.date.today() - dt.timedelta(1) def calculate(): su.tic("fetching get_data...") df_whole = pd.DataFrame() conn = engine_read.connect() year = process_date.year month = process_date.month
import calendar as cld, datetime as dt from dateutil.relativedelta import relativedelta import numpy as np, pandas as pd from utils.database import sqlfactory as sf, config as cfg, io from utils.script import scriptutils as su from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.script.scriptutils import tic table = sf.Table("m", "risk") ###w->m columns = ["fund_id", "statistic_date", "benchmark"] columns.extend(table.columns()) engines = cfg.load_engine() engine_rd = engines["2Gb"] engine_wt = engines["2Gb"] df_whole = pd.DataFrame() for year in range(2017, 2018): for month in range(2, 3): conn = engine_rd.connect() now = dt.datetime.now() year, month = year, month month_range = cld.monthrange(year, month)[1] time_to_fill = sf.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month month_range = time_to_fill.month_range
import calendar as cld, datetime as dt import numpy as np, pandas as pd from utils.database import sqlfactory as sf, config as cfg, io import utils.script.scriptutils as su from dateutil.relativedelta import relativedelta from utils.algorithm import fund_indicator as fi, timeutils as tu from utils.script.scriptutils import tic table = sf.Table("w", "return") columns = ["fund_id", "statistic_date", "benchmark"] columns.extend(table.columns()) engines = cfg.load_engine() engine_wt = engines["2Gb"] now = dt.datetime.now() df_whole = pd.DataFrame() for year in range(2017, 2018): for month in range(1, 1 + 1): conn = engines["2Gb"].connect() year, month = year, month month_range = cld.monthrange(year, month)[1] time_to_fill = sf.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month month_range = time_to_fill.month_range sql_bm = sf.SQL.market_index( time_to_fill.today) # Get benchmark prices sql_pe = sf.SQL.pe_index(time_to_fill.today, freq="w")
import calendar as cld, datetime as dt import numpy as np, pandas as pd from utils.database import sqlfactory as sf, config as cfg, io from utils.script import scriptutils as su from dateutil.relativedelta import relativedelta from utils.algorithm import fund_indicator as fi, timeutils as tu table = sf.Table("m", "bm") columns = ["index_id", "index_name", "statistic_date"] columns.extend(table.columns()) yesterday = dt.date.today() - dt.timedelta(1) engines = cfg.load_engine() engine_rd = engines["2Gb"] def calculate(): conn = engine_rd.connect() year, month = yesterday.year, yesterday.month month_range = cld.monthrange(year, month)[1] time_to_fill = sf.Time(dt.datetime(year, month, month_range)) year, month = time_to_fill.year, time_to_fill.month bms_used = [ "hs300", "csi500", "sse50", "ssia", "cbi", "y1_treasury_rate", "nfi" ] sql_bm = sf.SQL.market_index(date=time_to_fill.today, benchmarks=bms_used, whole=True) # Get benchmark prices bm = pd.read_sql(sql_bm, conn)