コード例 #1
0
def obv_feature_detection(obv: list, position: pd.DataFrame, **kwargs) -> list:
    """On Balance Volume Feature Detection

    Arguments:
        obv {list} -- on balance volume signal
        position {pd.DataFrame} -- fund dataset

    Optional Args:
        sma_interval {int} -- lookback of simple moving average (default: {20})
        plot_output {bool} -- (default: {True})
        filter_factor {float} -- threshold divisor (x/filter_factor) for "significant" OBVs
                                (default: {2.5})
        progress_bar {ProgressBar} -- (default: {None})

    Returns:
        list -- list of feature dictionaries
    """
    sma_interval = kwargs.get('sma_internal', 10)
    plot_output = kwargs.get('plot_output', True)
    filter_factor = kwargs.get('filter_factor', 2.5)
    progress_bar = kwargs.get('progress_bar')

    sma_interval2 = sma_interval * 2
    sma_interval3 = sma_interval2 * 2

    obv_sig = simple_moving_avg(obv, sma_interval, data_type='list')
    obv_sig2 = simple_moving_avg(obv, sma_interval2, data_type='list')
    obv_sig3 = simple_moving_avg(obv, sma_interval3, data_type='list')
    obv_diff = [ob - obv_sig2[i] for i, ob in enumerate(obv)]

    sma_features = find_obv_sma_trends(
        obv, [obv_sig, obv_sig2, obv_sig3],
        [sma_interval, sma_interval2, sma_interval3], position)

    if plot_output:
        generic_plotting([obv, obv_sig, obv_sig2, obv_sig3],
                         title='OBV Signal Line',
                         legend=[
                             'obv', f'sma-{sma_interval}',
                             f"sma-{sma_interval2}", f"sma-{sma_interval3}"
                         ])

    if progress_bar is not None:
        progress_bar.uptick(increment=0.25)

    filter_factor2 = filter_factor / 2.0
    ofilter = generate_obv_ofilter(obv_diff, filter_factor2)
    sig_features = find_obv_sig_vol_spikes(ofilter, position)

    features = []
    for sig in sig_features:
        features.append(sig)
    for sma in sma_features:
        features.append(sma)

    ofilter = generate_obv_ofilter(obv_diff, filter_factor)

    return ofilter, features
コード例 #2
0
def generate_fund_from_ledger(ledger_name: str):
    """Generate Fund from Ledger

    Arguments:
        ledger_name {str} -- either 'all' or a filename
    """
    if ledger_name == 'all' or ledger_name == 'all'.upper():
        path = os.path.join("resources", "ledgers")
        if not os.path.exists(path):
            print(f"No ledger directory '{path}' found.")
            return

        path = os.path.join(path, "*.csv")
        globs = glob.glob(path)

    else:
        path = os.path.join("resources", "ledgers", ledger_name)
        if not os.path.exists(path):
            print(f"No ledger named '{path}' found.")
            return

        globs = [path]

    ledgers = {}
    for i, path in enumerate(globs):
        ledger = pd.read_csv(path)
        content = extract_from_format(ledger, index=i)
        content = create_fund(content)

        ledgers[content['symbol']] = content

    plots = create_plot_content(ledgers)

    generic_plotting(plots['prices'],
                     title="Custom Funds",
                     ylabel='Price',
                     legend=plots['tickers'],
                     x=plots['x'])

    export_funds(ledgers)
コード例 #3
0
def get_bollinger_signals(position: pd.DataFrame, period: int, stdev: float,
                          **kwargs) -> dict:
    """Get Bollinger Band Signals

    Arguments:
        position {pd.DataFrame} -- dataset
        period {int} -- time frame for moving average
        stdev {float} -- multiplier for band range

    Optional Args:
        plot_output {bool} -- (default: {True})
        filter_type {str} -- type of moving average (default: {'simple'})
        name {str} -- (default: {''})
        view {str} -- (default: {None})

    Returns:
        dict -- bollinger band data object
    """
    filter_type = kwargs.get('filter_type', 'simple')
    plot_output = kwargs.get('plot_output', True)
    name = kwargs.get('name', '')
    view = kwargs.get('view')

    typical_price = []
    for i, close in enumerate(position['Close']):
        typical_price.append(
            (close + position['Low'][i] + position['High'][i]) / 3.0)

    if filter_type == 'exponential':
        ma = exponential_moving_avg(typical_price, period, data_type='list')
    else:
        ma = simple_moving_avg(typical_price, period, data_type='list')

    upper = ma.copy()
    lower = ma.copy()
    std_list = [0.0] * len(ma)
    for i in range(period, len(ma)):
        std = np.std(typical_price[i - (period):i])
        std_list[i] = std
        upper[i] = ma[i] + (stdev * std)
        lower[i] = ma[i] - (stdev * std)

    signals = {'upper_band': upper, 'lower_band': lower, 'middle_band': ma}

    name3 = INDEXES.get(name, name)
    name2 = name3 + ' - Bollinger Bands'
    if plot_output:
        generic_plotting(
            [position['Close'], ma, upper, lower],
            title=name2,
            x=position.index,
            legend=['Price', 'Moving Avg', 'Upper Band', 'Lower Band'])

    else:
        filename = os.path.join(name, view, f"bollinger_bands_{name}.png")
        generic_plotting(
            [position['Close'], ma, upper, lower],
            title=name2,
            x=position.index,
            legend=['Price', 'Moving Avg', 'Upper Band', 'Lower Band'],
            saveFig=True,
            filename=filename)

    return signals
コード例 #4
0
def relative_strength(primary_name: str, full_data_dict: dict,
                      **kwargs) -> list:
    """Relative Strength

    Compare a fund vs. market, sector, and/or other fund

    Arguments:
        primary_name {str} -- primary fund to compare against
        full_data_dict {dict} -- all retrieved funds by fund name

    Optional Args:
        secondary_fund_names {list} -- fund names to compare against (default: {[]})
        meta {dict} -- "metadata" from api calls (default: {None})
        config {dict} -- control config dictionary of software package (default: {None})
        sector {str} -- sector fund (if in full_data_dict) for comparison (default: {''})
        plot_output {bool} -- True to render plot in realtime (default: {True})
        progress_bar {ProgressBar} -- (default: {None})
        view {str} -- Directory of plots (default: {''})

    Returns:
        list -- dict containing all relative strength information, sector match data
    """
    secondary_fund_names = kwargs.get('secondary_fund_names', [])
    config = kwargs.get('config', None)
    sector = kwargs.get('sector', '')
    plot_output = kwargs.get('plot_output', True)
    progress_bar = kwargs.get('progress_bar', None)
    meta = kwargs.get('meta', None)
    sector_data = kwargs.get('sector_data', {})
    view = kwargs.get('view', '')

    period = kwargs.get('period', '2y')
    interval = kwargs.get('interval', '1d')

    sector_bench = None
    comp_funds = []
    comp_data = {}
    if meta is not None:
        match = meta.get('info', {}).get('sector')

        if match is not None:
            fund_len = {
                'length':
                len(full_data_dict[primary_name]['Close']),
                'start':
                full_data_dict[primary_name].index[0],
                'end':
                full_data_dict[primary_name].index[
                    len(full_data_dict[primary_name]['Close']) - 1],
                'dates':
                full_data_dict[primary_name].index
            }

            match_fund, match_data = api_sector_match(match,
                                                      config,
                                                      fund_len=fund_len,
                                                      period=period,
                                                      interval=interval)

            if match_fund is not None:
                comp_funds, comp_data = api_sector_funds(match_fund,
                                                         config,
                                                         fund_len=fund_len,
                                                         period=period,
                                                         interval=interval)

                if match_data is None:
                    match_data = full_data_dict
                sector = match_fund
                sector_data = match_data
                sector_bench = match_data[match_fund]

    if progress_bar is not None:
        progress_bar.uptick(increment=0.3)

    r_strength = dict()

    rat_sp = []
    rat_sector = []
    rat_secondaries = []
    secondary_names = []
    secondary_fund_names.extend(comp_funds)

    for key in comp_data:
        if sector_data.get(key) is None:
            sector_data[key] = comp_data[key]

    sp = get_SP500_df(full_data_dict)
    if sp is not None:
        rat_sp = normalized_ratio(full_data_dict[primary_name], sp)
    if progress_bar is not None:
        progress_bar.uptick(increment=0.1)

    if len(sector_data) > 0:
        rat_sector = normalized_ratio(full_data_dict[primary_name],
                                      sector_data[sector])
    if progress_bar is not None:
        progress_bar.uptick(increment=0.1)

    if len(secondary_fund_names) > 0:
        for sfund in secondary_fund_names:
            if full_data_dict.get(sfund) is not None:
                rat_secondaries.append(
                    normalized_ratio(full_data_dict[primary_name],
                                     full_data_dict[sfund]))
                secondary_names.append(sfund)

            elif sector_data.get(sfund) is not None:
                rat_secondaries.append(
                    normalized_ratio(full_data_dict[primary_name],
                                     sector_data[sfund]))
                secondary_names.append(sfund)

    if progress_bar is not None:
        progress_bar.uptick(increment=0.2)

    st = period_strength(primary_name,
                         full_data_dict,
                         config=config,
                         periods=[20, 50, 100],
                         sector=sector,
                         sector_data=sector_data.get(sector, None))

    r_strength['market'] = {'tabular': rat_sp, 'comparison': 'S&P500'}
    r_strength['sector'] = {'tabular': rat_sector, 'comparison': sector}
    r_strength['period'] = st
    r_strength['secondary'] = [{
        'tabular': second,
        'comparison': secondary_names[i]
    } for i, second in enumerate(rat_secondaries)]

    dates = dates_extractor_list(full_data_dict[list(
        full_data_dict.keys())[0]])
    if len(rat_sp) < len(dates):
        dates = dates[0:len(rat_sp)]

    output_data = []
    legend = []
    if len(rat_sp) > 0:
        output_data.append(rat_sp)
        legend.append("vs. S&P 500")

    if len(rat_sector) > 0:
        output_data.append(rat_sector)
        legend.append(f"vs. Sector ({sector})")

    if len(rat_secondaries) > 0:
        for i, rat in enumerate(rat_secondaries):
            output_data.append(rat)
            legend.append(f"vs. {secondary_names[i]}")

    r_strength['tabular'] = {}
    for i, out_data in enumerate(output_data):
        r_strength['tabular'][str(legend[i])] = out_data

    primary_name2 = INDEXES.get(primary_name, primary_name)
    title = f"Relative Strength of {primary_name2}"

    if progress_bar is not None:
        progress_bar.uptick(increment=0.1)

    if plot_output:
        generic_plotting(output_data,
                         x=dates,
                         title=title,
                         legend=legend,
                         ylabel='Difference Ratio')

    else:
        filename = os.path.join(primary_name, view,
                                f"relative_strength_{primary_name}.png")
        generic_plotting(output_data,
                         x=dates,
                         title=title,
                         saveFig=True,
                         filename=filename,
                         legend=legend,
                         ylabel='Difference Ratio')

    if progress_bar is not None:
        progress_bar.uptick(increment=0.2)

    return r_strength, sector_bench
コード例 #5
0
def rate_of_change_oscillator(fund: pd.DataFrame,
                              periods: list = [10, 20, 40],
                              **kwargs) -> dict:
    """Rate of Change Oscillator

    Arguments:
        fund {pd.DataFrame} -- fund dataset

    Keyword Arguments:
        periods {list} -- lookback periods for ROC (default: {[10, 20, 40]})

    Optional Args:
        plot_output {bool} -- (default: {True})
        name {str} -- (default: {''})
        views {str} -- (default: {''})
        progress_bar {ProgressBar} -- (default: {None})

    Returns:
        dict -- roc data object
    """
    plot_output = kwargs.get('plot_output', True)
    name = kwargs.get('name', '')
    views = kwargs.get('views', '')
    p_bar = kwargs.get('progress_bar')

    roc = dict()

    tshort = roc_signal(fund, periods[0])
    if p_bar is not None:
        p_bar.uptick(increment=0.1)

    tmed = roc_signal(fund, periods[1])
    if p_bar is not None:
        p_bar.uptick(increment=0.1)

    tlong = roc_signal(fund, periods[2])
    if p_bar is not None:
        p_bar.uptick(increment=0.1)

    roc['tabular'] = {'short': tshort, 'medium': tmed, 'long': tlong}
    roc['short'] = periods[0]
    roc['medium'] = periods[1]
    roc['long'] = periods[2]

    tsx, ts2 = adjust_signals(fund, tshort, offset=periods[0])
    tmx, tm2 = adjust_signals(fund, tmed, offset=periods[1])
    tlx, tl2 = adjust_signals(fund, tlong, offset=periods[2])
    if p_bar is not None:
        p_bar.uptick(increment=0.2)

    name2 = INDEXES.get(name, name)
    title = f"{name2} - Rate of Change Oscillator"
    plots = [ts2, tm2, tl2]
    xs = [tsx, tmx, tlx]

    if plot_output:
        dual_plotting(fund['Close'], [tshort, tmed, tlong],
                      'Price',
                      'Rate of Change',
                      title=title,
                      legend=[
                          f'ROC-{periods[0]}', f'ROC-{periods[1]}',
                          f'ROC-{periods[2]}'
                      ])
        generic_plotting(plots,
                         x=xs,
                         title=title,
                         legend=[
                             f'ROC-{periods[0]}', f'ROC-{periods[1]}',
                             f'ROC-{periods[2]}'
                         ])

    else:
        filename = os.path.join(name, views, f"rate_of_change_{name}")
        dual_plotting(fund['Close'], [tshort, tmed, tlong],
                      'Price',
                      'Rate of Change',
                      title=title,
                      legend=[
                          f'ROC-{periods[0]}', f'ROC-{periods[1]}',
                          f'ROC-{periods[2]}'
                      ],
                      saveFig=True,
                      filename=filename)

    if p_bar is not None:
        p_bar.uptick(increment=0.1)

    roc = roc_metrics(fund,
                      roc,
                      plot_output=plot_output,
                      name=name,
                      views=views,
                      p_bar=p_bar)

    roc['length_of_data'] = len(roc['tabular']['short'])
    roc['type'] = 'oscillator'

    if p_bar is not None:
        p_bar.uptick(increment=0.1)

    return roc
コード例 #6
0
def find_resistance_support_lines(data: pd.DataFrame, **kwargs) -> dict:
    """Find Resistance / Support Lines

    Arguments:
        data {pd.DataFrame} -- fund historical data

    Optional Args:
        name {str} -- name of fund, primarily for plotting (default: {''})
        plot_output {bool} -- True to render plot in realtime (default: {True})
        timeframes {list} -- time windows for feature discovery (default: {[13, 21, 34, 55]})
        progress_bar {ProgressBar} -- (default: {None})
        view {str} -- directory of plots (default: {''})

    Returns:
        dict -- contains all trendline information
    """
    name = kwargs.get('name', '')
    plot_output = kwargs.get('plot_output', True)
    timeframes = kwargs.get('timeframes', [13, 21, 34, 55])
    progress_bar = kwargs.get('progress_bar', None)
    view = kwargs.get('view', '')

    resist_support_lines = {}
    resist_support_lines['support'] = {}
    resist_support_lines['resistance'] = {}

    increment = 0.5 / (float(len(timeframes)))

    support = {}
    resistance = {}
    for time in timeframes:
        support[str(time)] = {}
        x, y = find_points(data, line_type='support',
                           timeframe=time, filter_type='windowed')
        support[str(time)]['x'] = x
        support[str(time)]['y'] = y
        sorted_support = sort_and_group(support)
        resist_support_lines['support'][str(
            time)] = cluster_notables(sorted_support, data)

        resistance[str(time)] = {}
        x2, y2 = find_points(data, line_type='resistance', timeframe=time)
        resistance[str(time)]['x'] = x2
        resistance[str(time)]['y'] = y2
        sorted_resistance = sort_and_group(resistance)
        resist_support_lines['resistance'][str(
            time)] = cluster_notables(sorted_resistance, data)

        if progress_bar is not None:
            progress_bar.uptick(increment=increment)

    Xs, Ys, Xr, Yr = get_plot_content(
        data, resist_support_lines, selected_timeframe=str(timeframes[len(timeframes)-1]))

    if progress_bar is not None:
        progress_bar.uptick(increment=0.2)

    Xc, Yc = res_sup_unions(Yr, Xr, Ys, Xs)
    # Odd list behavior when no res/sup lines drawn on appends, so if-else to fix
    if len(Yc) > 0:
        Xp = Xc.copy()
        Xp2 = dates_convert_from_index(data, Xp)
        Yp = Yc.copy()
        Xp2.append(data.index)
        Yp.append(remove_dates_from_close(data))
    else:
        Xp2 = data.index
        Yp = [remove_dates_from_close(data)]
    c = colorize_plots(len(Yp), primary_plot_index=len(Yp)-1)

    if progress_bar is not None:
        progress_bar.uptick(increment=0.1)

    name2 = INDEXES.get(name, name)
    if plot_output:
        generic_plotting(Yp, x=Xp2, colors=c,
                         title=f'{name2} Major Resistance & Support')
    else:
        filename = f"{name}/{view}/resist_support_{name}.png"
        generic_plotting(
            Yp, x=Xp2, colors=c, title=f'{name2} Major Resistance & Support', saveFig=True, filename=filename)

    if progress_bar is not None:
        progress_bar.uptick(increment=0.1)

    analysis = detailed_analysis([Yr, Ys, Yc], data, key_args={'Colors': c})
    if progress_bar is not None:
        progress_bar.uptick(increment=0.1)

    analysis['type'] = 'trend'

    return analysis
コード例 #7
0
def bond_composite_index(config: dict, **kwargs):
    """Bond Composite Index (BCI)

    Arguments:
        config {dict} -- controlling config dictionary

    Optional Args:
        plot_output {bool} -- True to render plot in realtime (default: {True})
        clock {float} -- time for prog_bar (default: {None})
    """
    plot_output = kwargs.get('plot_output', True)
    clock = kwargs.get('clock')

    period = config['period']
    properties = config['properties']
    plots = []
    legend = []

    # Validate each index key is set to True in the --core file
    if properties is not None:
        if 'Indexes' in properties:
            props = properties['Indexes']
            if 'Treasury Bond' in props:
                if props['Treasury Bond'] == True:
                    data, sectors, index_type, m_data = metrics_initializer(
                        period=period, bond_type='Treasury')
                    if m_data:
                        _, data, dates = composite_index(data, sectors, m_data,
                                                         plot_output=plot_output,
                                                         bond_type='Treasury',
                                                         index_type=index_type,
                                                         clock=clock)
                        plots.append(data)
                        legend.append("Treasury")

            if 'Corporate Bond' in props:
                if props['Corporate Bond'] == True:
                    data, sectors, index_type, m_data = metrics_initializer(
                        period=period, bond_type='Corporate')
                    if m_data:
                        _, data, dates = composite_index(data, sectors, m_data,
                                                         plot_output=plot_output,
                                                         bond_type='Corporate',
                                                         index_type=index_type,
                                                         clock=clock)
                        plots.append(data)
                        legend.append("Corporate")

            if 'International Bond' in props:
                if props['International Bond'] == True:
                    data, sectors, index_type, m_data = metrics_initializer(
                        period=period, bond_type='International')
                    if m_data:
                        _, data, dates = composite_index(data, sectors, m_data,
                                                         plot_output=plot_output,
                                                         bond_type='International',
                                                         index_type=index_type,
                                                         clock=clock)
                        plots.append(data)
                        legend.append("International")

            if len(plots) > 0:
                if plot_output:
                    generic_plotting(
                        plots, x=dates, title='Bond Composite Indexes',
                        legend=legend, ylabel='Normalized Price')
                else:
                    filename = f"combined_BCI.png"
                    generic_plotting(
                        plots, x=dates, title='Bond Composite Indexes',
                        legend=legend, saveFig=True, filename=filename,
                        ylabel='Normalized Price')
コード例 #8
0
def generate_hull_signal(position: pd.DataFrame, **kwargs) -> list:
    """Generate Hull Signal

    Similar to triple moving average, this produces 3 period hull signals

    Arguments:
        position {pd.DataFrame} -- fund dataset

    Optional Args:
        period {list} -- list of ints for 3 lookback periods (default: {9, 16, 36})
        plot_output {bool} -- (default: {True})
        name {str} -- (default: {''})
        p_bar {ProgressBar} -- (default: {None})
        view {str} -- directory of plots (default: {''})

    Returns:
        list -- hull data object
    """
    period = kwargs.get('period', [9, 16, 36])
    plot_output = kwargs.get('plot_output', True)
    name = kwargs.get('name', '')
    p_bar = kwargs.get('p_bar')
    view = kwargs.get('view', '')

    hull = {
        'short': {},
        'medium': {},
        'long': {},
        'tabular': {}
    }

    hull['short'] = {'period': period[0]}
    hull['medium'] = {'period': period[1]}
    hull['long'] = {'period': period[2]}

    plots = []
    for per in period:
        n_div_2 = weighted_moving_avg(position, int(per/2))
        n = weighted_moving_avg(position, per)

        wma = []
        for i, d in enumerate(n_div_2):
            t = (2.0 * d) - n[i]
            wma.append(t)

        sq_period = int(np.round(np.sqrt(float(per)), 0))
        hma = weighted_moving_avg(wma, sq_period, data_type='list')
        plots.append(hma.copy())

        if p_bar is not None:
            p_bar.uptick(increment=0.1)

    hull['tabular'] = {
        'short': plots[0],
        'medium': plots[1],
        'long': plots[2]
    }

    name3 = INDEXES.get(name, name)
    name2 = name3 + ' - Hull Moving Averages'
    legend = ['Price', 'HMA-short', 'HMA-medium', 'HMA-long']

    if plot_output:
        generic_plotting([position['Close'], plots[0], plots[1],
                          plots[2]], legend=legend, title=name2)
    else:
        filename = os.path.join(name, view, f"hull_moving_average_{name}.png")
        generic_plotting([position['Close'], plots[0], plots[1],
                          plots[2]], legend=legend, title=name2, saveFig=True, filename=filename)

    if p_bar is not None:
        p_bar.uptick(increment=0.2)

    return hull
コード例 #9
0
def composite_correlation(data: dict,
                          sectors: list,
                          progress_bar=None,
                          plot_output=True) -> dict:
    """Composite Correlation

    Betas and r-squared for 2 time periods for each sector (full, 1/2 time); plot of r-squared
    vs. S&P500 for last 50 or 100 days for each sector.

    Arguments:
        data {dict} -- data object
        sectors {list} -- list of sectors

    Keyword Arguments:
        progress_bar {ProgressBar} -- (default: {None})
        plot_output {bool} -- (default: {True})

    Returns:
        dict -- correlation dictionary
    """
    DIVISOR = 5
    correlations = {}

    if '^GSPC' in data.keys():
        tot_len = len(data['^GSPC']['Close'])
        start_pt = int(np.floor(tot_len / DIVISOR))

        if start_pt > 100:
            start_pt = 100

        corrs = {}
        dates = data['^GSPC'].index[start_pt:tot_len]
        net_correlation = [0.0] * (tot_len - start_pt)

        DIVISOR = 10.0
        increment = float(len(sectors)) / (float(tot_len - start_pt) /
                                           DIVISOR * float(len(sectors)))

        counter = 0
        for sector in sectors:
            correlations[sector] = simple_beta_rsq(
                data[sector],
                data['^GSPC'],
                recent_period=[int(np.round(tot_len / 2, 0)), tot_len])

            corrs[sector] = []
            for i in range(start_pt, tot_len):

                _, rsqd = beta_comparison_list(
                    data[sector]['Close'][i - start_pt:i],
                    data['^GSPC']['Close'][i - start_pt:i])

                corrs[sector].append(rsqd)
                net_correlation[i - start_pt] += rsqd

                counter += 1
                if counter == DIVISOR:
                    progress_bar.uptick(increment=increment)
                    counter = 0

        plots = [corrs[x] for x in corrs.keys()]
        legend = [x for x in corrs.keys()]

        generic_plotting(plots,
                         x=dates,
                         title='MCI Correlations',
                         legend=legend,
                         saveFig=(not plot_output),
                         filename='MCI_correlations.png')

        progress_bar.uptick()

        max_ = np.max(net_correlation)
        net_correlation = [x / max_ for x in net_correlation]

        legend = ['Net Correlation', 'S&P500']
        dual_plotting(net_correlation,
                      data['^GSPC']['Close'][start_pt:tot_len],
                      x=dates,
                      y1_label=legend[0],
                      y2_label=legend[1],
                      title='MCI Net Correlation',
                      saveFig=(not plot_output),
                      filename='MCI_net_correlation.png')

        progress_bar.uptick()

    return correlations
コード例 #10
0
def type_composite_index(**kwargs) -> list:
    """Type Composite Index (MCI)

    Similar to MCI, TCI compares broader market types (sensitive, cyclical, and defensive)

    Optional Args:
        config {dict} -- controlling config dictionary (default: {None})
        plot_output {bool} -- True to render plot in realtime (default: {True})
        period {str / list} -- time period for data (e.g. '2y') (default: {None})
        clock {float} -- time for prog_bar (default: {None})
        data {pd.DataFrame} -- fund datasets (default: {None})
        sectors {list} -- list of sectors (default: {None})

    returns:
        list -- dict contains all tci information, data, sectors
    """
    config = kwargs.get('config')
    period = kwargs.get('period')
    plot_output = kwargs.get('plot_output', True)
    clock = kwargs.get('clock')
    data = kwargs.get('data')
    sectors = kwargs.get('sectors')

    if config is not None:
        period = config['period']
        properties = config['properties']

    elif period is None:
        print(
            f"{ERROR_COLOR}ERROR: config and period both provided {period} " +
            f"for type_composite_index{NORMAL}")
        return {}

    else:
        # Support for release 1 versions
        period = period
        properties = dict()
        properties['Indexes'] = {}
        properties['Indexes']['Type Sector'] = True

    #  Validate each index key is set to True in the --core file
    if properties is not None:
        if 'Indexes' in properties.keys():
            props = properties['Indexes']
            if 'Type Sector' in props.keys():
                if props['Type Sector'] == True:

                    m_data = get_metrics_content()
                    if data is None or sectors is None:
                        data, sectors = metrics_initializer(m_data,
                                                            period='2y')

                    if data:
                        p = ProgressBar(19,
                                        name='Type Composite Index',
                                        offset=clock)
                        p.start()

                        tci = dict()

                        composite = {}
                        for sect in sectors:
                            cluster = cluster_oscs(data[sect],
                                                   plot_output=False,
                                                   function='market',
                                                   wma=False,
                                                   progress_bar=p)

                            graph = cluster['tabular']
                            composite[sect] = graph

                        defensive = type_composites(composite,
                                                    m_data,
                                                    type_type='Defensive')
                        p.uptick()

                        sensitive = type_composites(composite,
                                                    m_data,
                                                    type_type='Sensitive')
                        p.uptick()

                        cyclical = type_composites(composite,
                                                   m_data,
                                                   type_type='Cyclical')
                        p.uptick()

                        d_val = weighted_signals(data,
                                                 m_data,
                                                 type_type='Defensive')
                        p.uptick()

                        s_val = weighted_signals(data,
                                                 m_data,
                                                 type_type='Sensitive')
                        p.uptick()

                        c_val = weighted_signals(data,
                                                 m_data,
                                                 type_type='Cyclical')
                        p.uptick()

                        d_val = windowed_moving_avg(d_val, 3, data_type='list')
                        c_val = windowed_moving_avg(c_val, 3, data_type='list')
                        s_val = windowed_moving_avg(s_val, 3, data_type='list')
                        p.uptick()

                        tci['defensive'] = {
                            "tabular": d_val,
                            "clusters": defensive
                        }
                        tci['sensitive'] = {
                            "tabular": s_val,
                            "clusters": sensitive
                        }
                        tci['cyclical'] = {
                            "tabular": c_val,
                            "clusters": cyclical
                        }

                        dates = data['VGT'].index
                        if plot_output:
                            dual_plotting(y1=d_val,
                                          y2=defensive,
                                          y1_label='Defensive Index',
                                          y2_label='Clustered Osc',
                                          title='Defensive Index',
                                          x=dates)
                            dual_plotting(y1=s_val,
                                          y2=sensitive,
                                          y1_label='Sensitive Index',
                                          y2_label='Clustered Osc',
                                          title='Sensitive Index',
                                          x=dates)
                            dual_plotting(y1=c_val,
                                          y2=cyclical,
                                          y1_label='Cyclical Index',
                                          y2_label='Clustered Osc',
                                          title='Cyclical Index',
                                          x=dates)
                            generic_plotting(
                                [d_val, s_val, c_val],
                                legend=['Defensive', 'Sensitive', 'Cyclical'],
                                title='Type Indexes',
                                x=dates)
                        else:
                            generic_plotting(
                                [d_val, s_val, c_val],
                                legend=['Defensive', 'Sensitive', 'Cyclical'],
                                title='Type Indexes',
                                x=dates,
                                saveFig=True,
                                ylabel='Normalized "Price"',
                                filename='tci.png')

                        p.end()
                        return tci, data, sectors
    return {}, None, None
コード例 #11
0
def nasit_generation_function(config: dict, print_only=False):
    print(f"Generating Nasit funds...")
    print(f"")
    nasit_file = 'nasit.json'
    if not os.path.exists(nasit_file):
        print(
            f"{WARNING}WARNING: 'nasit.json' not found. Exiting...{NORMAL}")
        return

    with open(nasit_file) as f:
        nasit = json.load(f)
        f.close()

        fund_list = nasit.get('Funds', [])
        nasit_funds = dict()
        for fund in fund_list:
            t_data, has_cash = nasit_get_data(fund, config)
            nasit_funds[fund.get('ticker')] = nasit_extraction(
                fund, t_data, has_cash=has_cash)
            nasit_funds[f"{fund.get('ticker')}_ret"] = nasit_extraction(
                fund, t_data, has_cash=has_cash, by_price=False)

        if print_only:
            for f in nasit_funds:
                if "_ret" not in f:
                    fund = f
                    price = np.round(nasit_funds[f][-1], 2)
                    change = np.round(price - nasit_funds[f][-2], 2)
                    changep = np.round(
                        (price - nasit_funds[f][-2]) / nasit_funds[f][-2] * 100.0, 3)

                    if change > 0.0:
                        color = UP_COLOR
                    elif change < 0.0:
                        color = DOWN_COLOR
                    else:
                        color = NORMAL

                    print("")
                    print(
                        f"{TICKER}{fund}{color}   ${price} (${change}, {changep}%){NORMAL}")
            print("")
            print("")
            return

        df = pd.DataFrame(nasit_funds)
        out_file = 'output/NASIT.csv'
        df.to_csv(out_file)

        plotable = []
        plotable2 = []
        names = []
        names2 = []

        for f in nasit_funds:
            if '_ret' not in f:
                plotable.append(nasit_funds[f])
                names.append(f)
            else:
                plotable2.append(nasit_funds[f])
                names2.append(f)

        generic_plotting(plotable, legend=names, title='NASIT Passives')
        generic_plotting(plotable2, legend=names2,
                         title='NASIT Passives [Returns]')
コード例 #12
0
def get_trendlines_regression(signal: list, **kwargs) -> dict:
    """Get Trendlines Regression

    A regression-only based method of generating trendlines (w/o use of local minima and maxima).

    Arguments:
        signal {list} -- signal of which to find a trend (can be anything)

    Optional Args:
        iterations {int} -- number of types through trendline creation with "divisors"
                            (default: {15})
        threshold {float} -- acceptable ratio a trendline can be off and still counted in current
                             plot (default: {0.1})
        dates {list} -- typically DataFrame.index (default: {None})
        indicator {str} -- for plot name, indicator trend analyzed (default: {''})
        plot_output {bool} -- (default: {True})
        name {str} -- (default: {''})
        views {str} -- (default: {''})

    Returns:
        dict -- trendline content
    """
    config_path = os.path.join("resources", "config.json")
    if os.path.exists(config_path):
        with open(config_path, 'r') as cpf:
            c_data = json.load(cpf)
            cpf.close()

        ranges = c_data.get('trendlines', {}).get('divisors',
                                                  {}).get('ranges', [])
        ranged = 0
        for rg in ranges:
            if len(signal) > rg:
                ranged += 1

        divs = c_data.get('trendlines', {}).get('divisors', {}).get('divisors')
        if divs is not None:
            if len(divs) > ranged:
                DIVISORS = divs[ranged]

    iterations = kwargs.get('iterations', len(DIVISORS))
    threshold = kwargs.get('threshold', 0.1)
    dates = kwargs.get('dates')
    indicator = kwargs.get('indicator', '')
    plot_output = kwargs.get('plot_output', True)
    name = kwargs.get('name', '')
    views = kwargs.get('views', '')

    indexes = list(range(len(signal)))

    if iterations > len(DIVISORS):
        iterations = len(DIVISORS)
    divisors = DIVISORS[0:iterations]

    lines = []
    x_s = []
    t_line_content = []
    line_id = 0

    y_max = max(signal) - min(signal)
    x_max = len(signal)
    scale_change = float(x_max) / float(y_max)

    for div in divisors:
        period = int(len(signal) / div)
        for i in range(div):
            for k in range(2):

                data = dict()
                if i == div - 1:
                    data['value'] = signal[period * i:len(signal)].copy()
                    data['x'] = indexes[period * i:len(signal)].copy()
                else:
                    data['value'] = signal[period * i:period * (i + 1)].copy()
                    data['x'] = indexes[period * i:period * (i + 1)].copy()

                data = pd.DataFrame.from_dict(data)

                while len(data['x']) > 4:
                    reg = linregress(data['x'], data['value'])
                    if k == 0:
                        data = data.loc[data['value'] > reg[0] * data['x'] +
                                        reg[1]]
                    else:
                        data = data.loc[data['value'] < reg[0] * data['x'] +
                                        reg[1]]

                reg = linregress(data['x'], data['value'])
                content = {'slope': reg[0], 'intercept': reg[1]}
                content['angle'] = np.arctan(
                    reg[0] * scale_change) / np.pi * 180.0
                if reg[0] < 0.0:
                    content['angle'] = 180.0 + \
                        (np.arctan(reg[0] * scale_change) / np.pi * 180.0)

                line = []
                for ind in indexes:
                    line.append(reg[0] * ind + reg[1])

                x_line = indexes.copy()

                line_corrected, x_corrected = filter_nearest_to_signal(
                    signal, x_line, line)

                if len(x_corrected) > 0:
                    content['length'] = len(x_corrected)
                    content['id'] = line_id
                    line_id += 1

                    lines.append(line_corrected.copy())
                    x_s.append(x_corrected.copy())
                    t_line_content.append(content)
                    # lines.append(line)
                    # x_s.append(x_line)

        for i in range(period, len(signal), 2):
            for k in range(2):

                data = dict()
                data['value'] = signal[i - period:i].copy()
                data['x'] = indexes[i - period:i].copy()

                data = pd.DataFrame.from_dict(data)

                while len(data['x']) > 4:
                    reg = linregress(data['x'], data['value'])
                    if k == 0:
                        data = data.loc[data['value'] > reg[0] * data['x'] +
                                        reg[1]]
                    else:
                        data = data.loc[data['value'] < reg[0] * data['x'] +
                                        reg[1]]

                reg = linregress(data['x'], data['value'])
                content = {'slope': reg[0], 'intercept': reg[1]}
                content['angle'] = np.arctan(
                    reg[0] * scale_change) / np.pi * 180.0
                if reg[0] < 0.0:
                    content['angle'] = 180.0 + \
                        (np.arctan(reg[0] * scale_change) / np.pi * 180.0)

                line = []
                for ind in indexes:
                    line.append(reg[0] * ind + reg[1])

                x_line = indexes.copy()

                line_corrected, x_corrected = filter_nearest_to_signal(
                    signal, x_line, line, threshold=threshold)

                if len(x_corrected) > 0:
                    content['length'] = len(x_corrected)
                    content['id'] = line_id
                    line_id += 1

                    lines.append(line_corrected.copy())
                    x_s.append(x_corrected.copy())
                    t_line_content.append(content)

    # handle over load of lines (consolidate)
    # Idea: bucket sort t_line_content by 'slope', within each bucket then consolidate similar
    # intercepts, both by line extension/combination and on slope averaging. Track line 'id' list
    # so that the corrections can be made for plots and x_plots
    t_line_content, lines, x_s = consolidate_lines(t_line_content, lines, x_s,
                                                   signal)

    t_line_content, lines, x_s = consolidate_lines(t_line_content,
                                                   lines,
                                                   x_s,
                                                   signal,
                                                   thresh=0.2)

    t_line_content, lines, x_s = consolidate_lines(t_line_content,
                                                   lines,
                                                   x_s,
                                                   signal,
                                                   thresh=0.3)

    plots = []
    x_plots = []
    plots.append(signal)
    x_plots.append(list(range(len(signal))))
    plots.extend(lines)
    x_plots.extend(x_s)

    if dates is not None:
        new_xs = []
        for xps in x_plots:
            nxs = [dates[i] for i in xps]
            new_xs.append(nxs)

        x_plots = new_xs

    title = f"{indicator.capitalize()} Trendlines"
    if plot_output:
        generic_plotting(plots, x=x_plots, title=title)
    else:
        filename = os.path.join(name, views,
                                f"{indicator}_trendlines_{name}.png")
        generic_plotting(plots,
                         x=x_plots,
                         title=title,
                         filename=filename,
                         saveFig=True)

    trends = dict()
    return trends
コード例 #13
0
def get_trendlines(fund: pd.DataFrame, **kwargs) -> dict:
    """Get Trendlines

    Arguments:
        fund {pd.DataFrame} -- fund historical data

    Optional Args:
        name {str} -- name of fund, primarily for plotting (default: {''})
        plot_output {bool} -- True to render plot in realtime (default: {True})
        interval {list} -- list of windowed filter time periods (default: {[4, 8, 16, 32]})
        progress_bar {ProgressBar} -- (default: {None})
        sub_name {str} -- file extension within 'name' directory (default: {name})
        view {str} -- directory of plots (default: {''})
        meta {dict} -- 'metadata' object for fund (default: {None})
        out_suppress {bool} -- if True, skips plotting (default: {False})
        trend_window {list} -- line time windows (default: {[163, 91, 56, 27]})

    Returns:
        trends {dict} -- contains all trend lines determined by algorithm
    """
    name = kwargs.get('name', '')
    plot_output = kwargs.get('plot_output', True)
    interval = kwargs.get('interval', [4, 8, 16, 32])
    progress_bar = kwargs.get('progress_bar', None)
    sub_name = kwargs.get('sub_name', f"trendline_{name}")
    view = kwargs.get('view', '')
    meta = kwargs.get('meta')
    out_suppress = kwargs.get('out_suppress', False)
    trend_window = kwargs.get('trend_window', [163, 91, 56, 27])

    # Not ideal to ignore warnings, but these are handled already by scipy/numpy so... eh...
    warnings.filterwarnings("ignore", category=RuntimeWarning)

    trends = dict()

    mins_y = []
    mins_x = []
    maxes_y = []
    maxes_x = []
    all_x = []

    vq = 0.06
    if meta is not None:
        vol = meta.get('volatility', {}).get('VQ')
        if vol is not None:
            vq = vol / 100.0

    increment = 0.7 / (float(len(interval)) * 3)

    for i, period in enumerate(interval):
        ma_size = period

        # ma = windowed_ma_list(fund['Close'], interval=ma_size)
        weight_strength = 2.0 + (0.1 * float(i))
        ma = windowed_moving_avg(fund['Close'],
                                 interval=ma_size,
                                 weight_strength=weight_strength,
                                 data_type='list',
                                 filter_type='exponential')
        ex = find_filtered_local_extrema(ma)
        r = reconstruct_extrema(fund,
                                extrema=ex,
                                ma_size=ma_size,
                                ma_type='windowed')

        # Cleanse data sample for duplicates and errors
        r = remove_duplicates(r, method='point')

        for y in r['min']:
            if y[0] not in mins_x:
                mins_x.append(y[0])
                mins_y.append(y[1])
            if y[0] not in all_x:
                all_x.append(y[0])
        for y in r['max']:
            if y[0] not in maxes_x:
                maxes_x.append(y[0])
                maxes_y.append(y[1])
            if y[0] not in all_x:
                all_x.append(y[0])

        if progress_bar is not None:
            progress_bar.uptick(increment=increment)

    zipped_min = list(zip(mins_x, mins_y))
    zipped_min.sort(key=lambda x: x[0])
    mins_x = [x[0] for x in zipped_min]
    mins_y = [y[1] for y in zipped_min]

    zipped_max = list(zip(maxes_x, maxes_y))
    zipped_max.sort(key=lambda x: x[0])
    maxes_x = [x[0] for x in zipped_max]
    maxes_y = [y[1] for y in zipped_max]

    # mins_xd = [fund.index[x] for x in mins_x]
    # maxes_xd = [fund.index[x] for x in maxes_x]

    long_term = trend_window[0]
    intermediate_term = trend_window[1]
    short_term = trend_window[2]
    near_term = trend_window[3]

    X0, Y0 = get_lines_from_period(fund,
                                   [mins_x, mins_y, maxes_x, maxes_y, all_x],
                                   interval=long_term,
                                   vq=vq)
    X1, Y1 = get_lines_from_period(fund,
                                   [mins_x, mins_y, maxes_x, maxes_y, all_x],
                                   interval=intermediate_term,
                                   vq=vq)
    X2, Y2 = get_lines_from_period(fund,
                                   [mins_x, mins_y, maxes_x, maxes_y, all_x],
                                   interval=short_term,
                                   vq=vq)
    X3, Y3 = get_lines_from_period(fund,
                                   [mins_x, mins_y, maxes_x, maxes_y, all_x],
                                   interval=near_term,
                                   vq=vq)

    if progress_bar is not None:
        progress_bar.uptick(increment=increment * 4.0)

    X = []
    Y = []
    C = []
    L = []

    for i, x in enumerate(X0):
        X.append(x)
        Y.append(Y0[i])
        C.append('blue')
        L.append('long')

    for i, x in enumerate(X1):
        X.append(x)
        Y.append(Y1[i])
        C.append('green')
        L.append('intermediate')

    for i, x in enumerate(X2):
        X.append(x)
        Y.append(Y2[i])
        C.append('orange')
        L.append('short')

    for i, x in enumerate(X3):
        X.append(x)
        Y.append(Y3[i])
        C.append('red')
        L.append('near')

    if progress_bar is not None:
        progress_bar.uptick(increment=increment * 4.0)

    analysis_list = generate_analysis(fund,
                                      x_list=X,
                                      y_list=Y,
                                      len_list=L,
                                      color_list=C)

    if progress_bar is not None:
        progress_bar.uptick(increment=0.1)

    X = dates_convert_from_index(fund, X)

    X.append(fund.index)
    Y.append(fund['Close'])
    C.append('black')

    name2 = INDEXES.get(name, name)

    if not out_suppress:
        try:
            title = f"{name2} Trend Lines for {near_term}, {short_term}, " + \
                f"{intermediate_term}, and {long_term} Periods"
            if plot_output:
                generic_plotting(Y, x=X, colors=C, title=title)
            else:
                filename = os.path.join(name, view, f"{sub_name}.png")
                generic_plotting(Y,
                                 x=X,
                                 colors=C,
                                 title=title,
                                 saveFig=True,
                                 filename=filename)

        except:
            print(
                f"{WARNING}Warning: plot failed to generate in trends.get_trendlines.{NORMAL}"
            )

    if progress_bar is not None:
        progress_bar.uptick(increment=0.2)

    trends['trendlines'] = analysis_list

    current = []
    metrics = []

    for trend in analysis_list:
        if trend['current']:
            current.append(trend)
            met = {f"{trend.get('term')} term": trend.get('type')}
            met['periods'] = trend.get('length')
            metrics.append(met)

    trends['current'] = current
    trends['metrics'] = metrics
    trends['type'] = 'trend'

    return trends