示例#1
0
def research_db(sectors_list, sectors_filter_out, countries_list,
                countries_filter_out, evr_range, pe_range, pm_range,
                ev_millions_range, research_mode_max_ev, csv_db_path,
                db_filename, read_united_states_input_symbols, scan_mode,
                generate_result_folders, appearance_counter_min,
                appearance_counter_max, favor_sectors, favor_sectors_by,
                newer_path, older_path, db_exists_in_both_folders,
                diff_only_result, movement_threshold, res_length):
    if scan_mode == SCAN_MODE_TASE:
        tase_mode = 1
    else:
        tase_mode = 0

    if research_mode_max_ev:
        ev_millions_range = list(
            reversed(ev_millions_range)
        )  # Flip order to have stocks with higher EV first (as limit shall be Max and not Min)

    appearance_counter_dict_sss = {}
    prepare_appearance_counters_dictionaries(csv_db_path, db_filename,
                                             appearance_counter_dict_sss)
    ev_millions_range_len = len(ev_millions_range)
    pe_range_len = len(pe_range)
    evr_range_len = len(evr_range)
    pm_range_len = len(pm_range)
    research_rows_sss = np.zeros(
        (ev_millions_range_len, pe_range_len, evr_range_len, pm_range_len),
        dtype=int)
    for ev_millions_index, ev_millions_limit in enumerate(ev_millions_range):
        for pe_index, price_to_earnings_limit in enumerate(pe_range):
            for evr_index, enterprise_value_to_revenue_limit in enumerate(
                    evr_range):
                for pm_index, profit_margin_limit in enumerate(
                        pm_range
                ):  # TODO: ASAFR: Below 1. Ambiguity of parameters - narrow down. 2. Some magic numbers on ev_to_cfo_ration etc 100.0 and 1000.0 - make order and defines/constants/multi_dim here
                    num_results_for_ev_pe_evr_and_pm = sss.sss_run(
                        reference_run=[],
                        sectors_list=sectors_list,
                        sectors_filter_out=sectors_filter_out,
                        countries_list=countries_list,
                        countries_filter_out=countries_filter_out,
                        build_csv_db_only=0,
                        build_csv_db=0,
                        csv_db_path=csv_db_path,
                        db_filename=db_filename,
                        read_united_states_input_symbols=
                        read_united_states_input_symbols,
                        tase_mode=tase_mode,
                        num_threads=1,
                        market_cap_included=1,
                        research_mode=1,
                        profit_margin_limit=float(profit_margin_limit) / 100.0,
                        enterprise_value_millions_usd_limit=ev_millions_limit,
                        research_mode_max_ev=research_mode_max_ev,
                        ev_to_cfo_ratio_limit=10e9,
                        debt_to_equity_limit=10e9,
                        price_to_earnings_limit=price_to_earnings_limit,
                        enterprise_value_to_revenue_limit=
                        enterprise_value_to_revenue_limit,
                        favor_sectors=favor_sectors,
                        favor_sectors_by=favor_sectors_by,
                        generate_result_folders=generate_result_folders,
                        appearance_counter_dict_sss=appearance_counter_dict_sss,
                        appearance_counter_min=appearance_counter_min,
                        appearance_counter_max=appearance_counter_max)
                    if num_results_for_ev_pe_evr_and_pm < appearance_counter_min:
                        break  # already lower than appearance_counter_min results. With higher profit margin limit there will always be less results -> save running time by breaking
                    research_rows_sss[ev_millions_index][pe_index][evr_index][
                        pm_index] = int(num_results_for_ev_pe_evr_and_pm)
                    print(
                        'ev_millions_limit {:6} | price_to_earnings_limit {:8} | row {:3} -> (enterprise_value_to_revenue_limit {:8}) | col {:3} -> (profit_margin_limit {:7}%): num_results_for_ev_pe_evr_and_pm = {}'
                        .format(ev_millions_limit, price_to_earnings_limit,
                                evr_index, enterprise_value_to_revenue_limit,
                                pm_index, profit_margin_limit,
                                num_results_for_ev_pe_evr_and_pm))
    results_filename = 'results_without_labels.csv'

    mesh_combined = combine_multi_dim_to_table_4d(multi_dim=research_rows_sss,
                                                  dim4=ev_millions_range,
                                                  dim3=pe_range,
                                                  rows=evr_range,
                                                  cols=pm_range)

    np.savetxt(csv_db_path + '/' + results_filename,
               mesh_combined,
               fmt='%f',
               delimiter=',')
    title_row = pm_range  # column 3 and onwards
    title_row.insert(0, 'evr / pm')  # column 2
    title_row.insert(0, 'pe')  # column 1
    title_row.insert(0, 'ev')  # column 0
    ev_pe_evr_rows_pm_cols_filenames_list = [
        csv_db_path + '/' + results_filename
    ]
    # Read Results, and add row and col axis:
    for filename in ev_pe_evr_rows_pm_cols_filenames_list:
        ev_pe_evr_rows_pm_cols = [title_row]
        with open(filename, mode='r', newline='') as engine:
            reader = csv.reader(engine, delimiter=',')
            row_index = 0  # title + len(ev_millions_range)*len(evr_range)
            for row in reader:
                ev_pe_evr_rows_pm_cols.append(row)
                row.insert(2, evr_range[
                    int(row_index) %
                    len(evr_range)])  # column 2 is evr repetitively cyclic
                row_index += 1
    for index in range(len(ev_pe_evr_rows_pm_cols_filenames_list)):
        row_col_csv_filename = ev_pe_evr_rows_pm_cols_filenames_list[
            index].replace('.csv', '_with_labels.csv')
        os.makedirs(os.path.dirname(row_col_csv_filename), exist_ok=True)
        with open(row_col_csv_filename, mode='w', newline='') as engine:
            writer = csv.writer(engine)
            writer.writerows(ev_pe_evr_rows_pm_cols)

    sorted_appearance_counter_dict_sss = {
        k: v
        for k, v in sorted(appearance_counter_dict_sss.items(),
                           key=lambda item: item[1],
                           reverse=True)
    }
    result_sorted_appearance_counter_dict_sss = {
        k: v
        for k, v in sorted_appearance_counter_dict_sss.items() if v > 0.0
    }

    result_list_filename_sss = csv_db_path + '/results_sss.csv'

    with open(result_list_filename_sss, 'w') as f:
        f.write("Symbol,Name,Sector,sss_value,close,appearance_counter\n")
        for key in result_sorted_appearance_counter_dict_sss.keys():
            #                              Symbol,    Name,                    Sector S      Close        appearance_counter
            f.write(
                "%s,%s,%s,%s,%s,%s\n" %
                (key[0], str(key[1]).replace(',', ' '), key[2], key[3], key[4],
                 round(result_sorted_appearance_counter_dict_sss[key], 4)))

    if older_path is not None:
        diff_lists = sss_diff.run(
            newer_path=newer_path,
            older_path=older_path,
            db_exists_in_both_folders=db_exists_in_both_folders,
            diff_only_result=diff_only_result,
            movement_threshold=movement_threshold,
            res_length=res_length,
            consider_as_new_from=PDF_NUM_ENTRIES_IN_REPORT)

        #                                                                                                  0:15 is date and time
        pdf_generator.csv_to_pdf(
            csv_filename=result_list_filename_sss,
            csv_db_path=csv_db_path,
            data_time_str=result_list_filename_sss.replace(
                'Results', '').replace('Tase', '').replace('Nsr', '').replace(
                    'All', '').replace('Custom', '').replace('/', '')[0:15],
            title=TITLES[scan_mode].replace('_', ' '),
            limit_num_rows=PDF_NUM_ENTRIES_IN_REPORT,
            diff_list=diff_lists[0],
            tase_mode=tase_mode)
示例#2
0
if not research_mode:  # Run Build DB Only:
    if run_custom_tase:
        sss.sss_run(reference_run=reference_run_tase,
                    sectors_list=[],
                    sectors_filter_out=0,
                    countries_list=[],
                    countries_filter_out=0,
                    build_csv_db_only=1,
                    build_csv_db=1,
                    csv_db_path='None',
                    db_filename='None',
                    read_united_states_input_symbols=0,
                    tase_mode=1,
                    num_threads=1,
                    market_cap_included=1,
                    research_mode=0,
                    profit_margin_limit=0.0001,
                    ev_to_cfo_ratio_limit=10e9,
                    debt_to_equity_limit=10e9,
                    enterprise_value_millions_usd_limit=5,
                    research_mode_max_ev=False,
                    price_to_earnings_limit=10e9,
                    enterprise_value_to_revenue_limit=10e9,
                    favor_sectors=[],
                    favor_sectors_by=[],
                    generate_result_folders=1,
                    custom_portfolio=sss_config.custom_portfolio_tase)
    if run_custom:
        sss.sss_run(reference_run=reference_run_all,
                    sectors_list=[],
示例#3
0
文件: sss_run.py 项目: coolsnake/sss
# ==============================================
# sss.sss_run(sectors_list=[], sectors_filter_out=0, countries_list=['United States'], countries_filter_out=0, build_csv_db_only=1, build_csv_db=1, csv_db_path='None', read_united_states_input_symbols=0, tase_mode=0, num_threads=20, market_cap_included=1, use_investpy=0, research_mode=0, profit_margin_limit=0.01, ev_to_cfo_ratio_limit=200.0, min_enterprise_value_millions_usd=100, best_n_select=2, enterprise_value_to_revenue_limit=200, favor_sectors=['Technology', 'Financial Services'], favor_sectors_by=[4.5, 0.33333], generate_result_folders=1)

# Run Build DB Only: All/Others
# =============================
sss.sss_run(sectors_list=[],
            sectors_filter_out=0,
            countries_list=['United States'],
            countries_filter_out=0,
            build_csv_db_only=1,
            build_csv_db=1,
            csv_db_path='None',
            read_united_states_input_symbols=1,
            tase_mode=0,
            num_threads=20,
            market_cap_included=1,
            use_investpy=0,
            research_mode=0,
            profit_margin_limit=0.01,
            ev_to_cfo_ratio_limit=20000.0,
            min_enterprise_value_millions_usd=5,
            best_n_select=3,
            enterprise_value_to_revenue_limit=1000,
            favor_sectors=['Technology', 'Financial Services'],
            favor_sectors_by=[4.5, 1],
            generate_result_folders=1)

# Research Mode:
# ==============

示例#4
0
文件: sss_run.py 项目: coolsnake/sss
def research_db(sectors_list, sectors_filter_out, countries_list,
                countries_filter_out, evr_range, pm_range, ev_millions_range,
                csv_db_path, read_united_states_input_symbols, scan_mode,
                generate_result_folders, appearance_counter_min,
                appearance_counter_max, favor_sectors, favor_sectors_by,
                newer_path, older_path, db_exists_in_both_folders,
                diff_only_recommendation, movement_threshold, newer_rec_ranges,
                older_rec_ranges, rec_length):
    if scan_mode == SCAN_MODE_TASE:
        tase_mode = 1
    else:
        tase_mode = 0

    appearance_counter_dict_sss = {}
    appearance_counter_dict_ssss = {}
    appearance_counter_dict_sssss = {}
    prepare_appearance_counters_dictionaries(csv_db_path,
                                             appearance_counter_dict_sss,
                                             appearance_counter_dict_ssss,
                                             appearance_counter_dict_sssss)
    ev_millions_range_len = len(ev_millions_range)
    evr_range_len = len(evr_range)
    pm_range_len = len(pm_range)
    research_rows_sss = np.zeros(
        (ev_millions_range_len, evr_range_len, pm_range_len), dtype=int)
    research_rows_ssss = np.zeros(
        (ev_millions_range_len, evr_range_len, pm_range_len), dtype=int)
    research_rows_sssss = np.zeros(
        (ev_millions_range_len, evr_range_len, pm_range_len), dtype=int)
    for ev_millions_index, ev_millions_limit in enumerate(ev_millions_range):
        for evr_index, enterprise_value_to_revenue_limit in enumerate(
                evr_range):
            for pm_index, profit_margin_limit in enumerate(pm_range):
                num_results_for_ev_evr_and_pm = sss.sss_run(
                    sectors_list=sectors_list,
                    sectors_filter_out=sectors_filter_out,
                    countries_list=countries_list,
                    countries_filter_out=countries_filter_out,
                    build_csv_db_only=0,
                    build_csv_db=0,
                    csv_db_path=csv_db_path,
                    read_united_states_input_symbols=
                    read_united_states_input_symbols,
                    tase_mode=tase_mode,
                    num_threads=1,
                    market_cap_included=1,
                    use_investpy=0,
                    research_mode=1,
                    profit_margin_limit=float(profit_margin_limit) / 100.0,
                    min_enterprise_value_millions_usd=ev_millions_limit,
                    ev_to_cfo_ratio_limit=100.0,
                    best_n_select=3,
                    enterprise_value_to_revenue_limit=
                    enterprise_value_to_revenue_limit,
                    favor_sectors=favor_sectors,
                    favor_sectors_by=favor_sectors_by,
                    generate_result_folders=generate_result_folders,
                    appearance_counter_dict_sss=appearance_counter_dict_sss,
                    appearance_counter_dict_ssss=appearance_counter_dict_ssss,
                    appearance_counter_dict_sssss=appearance_counter_dict_sssss,
                    appearance_counter_min=appearance_counter_min,
                    appearance_counter_max=appearance_counter_max)
                if num_results_for_ev_evr_and_pm < appearance_counter_min:
                    break  # already lower than appearance_counter_min results. With higher profit margin limit there will always be less results -> save running time by breaking
                research_rows_sss[ev_millions_index][evr_index][
                    pm_index] = int(num_results_for_ev_evr_and_pm)
                research_rows_ssss[ev_millions_index][evr_index][
                    pm_index] = int(num_results_for_ev_evr_and_pm)
                research_rows_sssss[ev_millions_index][evr_index][
                    pm_index] = int(num_results_for_ev_evr_and_pm)
                print(
                    'ev_millions_limit {:6} | row {:3} -> (enterprise_value_to_revenue_limit {:8}) | col {:3} -> (profit_margin_limit {:7}%): num_results_for_ev_evr_and_pm = {}'
                    .format(ev_millions_limit, evr_index,
                            enterprise_value_to_revenue_limit, pm_index,
                            profit_margin_limit,
                            num_results_for_ev_evr_and_pm))
    results_filename = 'results_evm{}-{}_evr{}-{}_pm{}-{}.csv'.format(
        ev_millions_range[0], ev_millions_range[-1], evr_range[0],
        evr_range[-1], pm_range[0], pm_range[-1])

    mesh_combined = combine_multi_dim_to_table(multi_dim=research_rows_sss,
                                               dim3=ev_millions_range,
                                               rows=evr_range,
                                               cols=pm_range)

    np.savetxt(csv_db_path + '/' + results_filename,
               mesh_combined.astype(int),
               fmt='%d',
               delimiter=',')
    title_row = pm_range  # column 2 and onwards
    title_row.insert(0, 'evr / pm')  # column 1
    title_row.insert(0, 'ev')  # column 0
    ev_evr_rows_pm_cols_filenames_list = [csv_db_path + '/' + results_filename]
    # Read Results, and add row and col axis:
    for filename in ev_evr_rows_pm_cols_filenames_list:
        ev_evr_rows_pm_cols = [title_row]
        with open(filename, mode='r', newline='') as engine:
            reader = csv.reader(engine, delimiter=',')
            row_index = 0  # title + len(ev_millions_range)*len(evr_range)
            for row in reader:
                row.insert(0, evr_range[
                    row_index %
                    len(evr_range)])  # column 1 is evr repetitively cyclic
                row.insert(
                    0, evr_range[int(row_index / len(evr_range))]
                )  # column 0 is the ev, increasing every len(evr_range)
                ev_evr_rows_pm_cols.append(row)
                row_index += 1
    for index in range(len(ev_evr_rows_pm_cols_filenames_list)):
        row_col_csv_filename = ev_evr_rows_pm_cols_filenames_list[
            index].replace('.csv', '_evr_row_pm_col.csv')
        os.makedirs(os.path.dirname(row_col_csv_filename), exist_ok=True)
        with open(row_col_csv_filename, mode='w', newline='') as engine:
            writer = csv.writer(engine)
            writer.writerows(ev_evr_rows_pm_cols)

    sorted_appearance_counter_dict_sss = {
        k: v
        for k, v in sorted(appearance_counter_dict_sss.items(),
                           key=lambda item: item[1],
                           reverse=True)
    }
    result_sorted_appearance_counter_dict_sss = {
        k: v
        for k, v in sorted_appearance_counter_dict_sss.items() if v > 0.0
    }

    sorted_appearance_counter_dict_ssss = {
        k: v
        for k, v in sorted(appearance_counter_dict_ssss.items(),
                           key=lambda item: item[1],
                           reverse=True)
    }
    result_sorted_appearance_counter_dict_ssss = {
        k: v
        for k, v in sorted_appearance_counter_dict_ssss.items() if v > 0.0
    }

    sorted_appearance_counter_dict_sssss = {
        k: v
        for k, v in sorted(appearance_counter_dict_sssss.items(),
                           key=lambda item: item[1],
                           reverse=True)
    }
    result_sorted_appearance_counter_dict_sssss = {
        k: v
        for k, v in sorted_appearance_counter_dict_sssss.items() if v > 0.0
    }

    recommendation_list_filename_sss = csv_db_path + '/rec_sss_' + results_filename.replace(
        'results_', '')
    recommendation_list_filename_ssss = csv_db_path + '/rec_ssss_' + results_filename.replace(
        'results_', '')
    recommendation_list_filename_sssss = csv_db_path + '/rec_sssss_' + results_filename.replace(
        'results_', '')

    with open(recommendation_list_filename_sss, 'w') as f:
        f.write("Ticker,Name,Sector,sss_value,close,appearance_counter\n")
        for key in result_sorted_appearance_counter_dict_sss.keys():
            #                              Ticker,    Name,                    Sector S      Close        appearance_counter
            f.write(
                "%s,%s,%s,%s,%s,%s\n" %
                (key[0], str(key[1]).replace(',', ' '), key[2], key[3], key[4],
                 round(result_sorted_appearance_counter_dict_sss[key], 4)))

    with open(recommendation_list_filename_ssss, 'w') as f:
        f.write("Ticker,Name,Sector,ssss_value,close,appearance_counter\n")
        for key in result_sorted_appearance_counter_dict_ssss.keys():
            f.write(
                "%s,%s,%s,%s,%s,%s\n" %
                (key[0], str(key[1]).replace(',', ' '), key[2], key[3], key[4],
                 round(result_sorted_appearance_counter_dict_ssss[key], 4)))

    with open(recommendation_list_filename_sssss, 'w') as f:
        f.write("Ticker,Name,Sector,sssss_value,close,appearance_counter\n")
        for key in result_sorted_appearance_counter_dict_sssss.keys():
            f.write(
                "%s,%s,%s,%s,%s,%s\n" %
                (key[0], str(key[1]).replace(',', ' '), key[2], key[3], key[4],
                 round(result_sorted_appearance_counter_dict_sssss[key], 4)))

    if older_path is not None:
        diff_lists = sss_diff.run(
            newer_path=newer_path,
            older_path=older_path,
            db_exists_in_both_folders=db_exists_in_both_folders,
            diff_only_recommendation=diff_only_recommendation,
            movement_threshold=movement_threshold,
            newer_rec_ranges=newer_rec_ranges,
            older_rec_ranges=older_rec_ranges,
            rec_length=rec_length,
            consider_as_new_from=PDF_NUM_ENTRIES_IN_REPORT)

        #                                                                                                  0:15 is date and time
        pdf_generator.csv_to_pdf(
            csv_filename=recommendation_list_filename_sss,
            csv_db_path=csv_db_path,
            data_time_str=recommendation_list_filename_sss.replace(
                'Results/', '')[0:15],
            title=TITLES[scan_mode].replace('_', ' '),
            limit_num_rows=PDF_NUM_ENTRIES_IN_REPORT,
            diff_list=diff_lists[0],
            tase_mode=tase_mode)
        pdf_generator.csv_to_pdf(
            csv_filename=recommendation_list_filename_ssss,
            csv_db_path=None,
            data_time_str=recommendation_list_filename_ssss.replace(
                'Results/', '')[0:15],
            title=TITLES[scan_mode].replace('_', ' ') + 'ssss',
            limit_num_rows=PDF_NUM_ENTRIES_IN_REPORT,
            diff_list=diff_lists[0],
            tase_mode=tase_mode)
        pdf_generator.csv_to_pdf(
            csv_filename=recommendation_list_filename_sssss,
            csv_db_path=None,
            data_time_str=recommendation_list_filename_sssss.replace(
                'Results/', '')[0:15],
            title=TITLES[scan_mode].replace('_', ' ') + 'sssss',
            limit_num_rows=PDF_NUM_ENTRIES_IN_REPORT,
            diff_list=diff_lists[0],
            tase_mode=tase_mode)
示例#5
0
文件: sss_run.py 项目: mkaladi/sss
research_mode = True  # Research Mode

if not research_mode:  # Run Build DB Only:
    if run_custom_tase:
        sss.sss_run(sectors_list=[],
                    sectors_filter_out=0,
                    countries_list=[],
                    countries_filter_out=0,
                    build_csv_db_only=1,
                    build_csv_db=1,
                    csv_db_path='None',
                    read_united_states_input_symbols=0,
                    tase_mode=1,
                    num_threads=1,
                    market_cap_included=1,
                    use_investpy=0,
                    research_mode=0,
                    profit_margin_limit=0.0001,
                    ev_to_cfo_ratio_limit=20000.0,
                    debt_to_equity_limit=1000.0,
                    min_enterprise_value_millions_usd=5,
                    price_to_earnings_limit=9000,
                    enterprise_value_to_revenue_limit=1500,
                    favor_sectors=['Technology', 'Financial Services'],
                    favor_sectors_by=[4.0, 0.75],
                    generate_result_folders=1,
                    custom_portfolio=['IGLD-M.TA'])
    if run_custom:
        sss.sss_run(
            sectors_list=[],
            sectors_filter_out=0,