示例#1
0
def make_pipeline():

    return Pipeline(
        columns={
            # Your pipeline columns go here.
        },
        screen=QTradableStocksUS())
示例#2
0
def  make_pipeline():

    base_universe = QTradableStocksUS()

    sentiment_score = SimpleMovingAverage(
        inputs=[stocktwits.bull_minus_bear],
        window_length=3,
    )

    # Create filter for top 350 and bottom 350
    # assets based on their sentiment scores
    top_bottom_scores = (
        sentiment_score.top(350) | sentiment_score.bottom(350)
    )

    return Pipeline(
        columns={
            'sentiment_score': sentiment_score,
        },
        # Set screen as the intersection between our filter
        # and trading universe
        screen=(
            base_universe
            & top_bottom_scores
        )
    )
示例#3
0
def make_pipeline(context):
    ## symbol universe
    base_universe = QTradableStocksUS()  #Q500US() if False else Q1500US()
    ## filters
    # Filter for primary share equities. IsPrimaryShare is a built-in filter.
    primary_share = IsPrimaryShare()
    # Equities listed as common stock (as opposed to, say, preferred stock).
    # 'ST00000001' indicates common stock.
    common_stock = morningstar.share_class_reference.security_type.latest.eq(
        'ST00000001')
    # Non-depositary receipts. Recall that the ~ operator inverts filters,
    # turning Trues into Falses and vice versa
    not_depositary = ~morningstar.share_class_reference.is_depositary_receipt.latest
    # Equities not trading over-the-counter.
    not_otc = ~morningstar.share_class_reference.exchange_id.latest.startswith(
        'OTC')
    # Not when-issued equities.
    not_wi = ~morningstar.share_class_reference.symbol.latest.endswith('.WI')
    # Equities without LP in their name, .matches does a match using a regular expression
    not_lp_name = ~morningstar.company_reference.standard_name.latest.matches(
        '.* L[. ]?P.?$')
    # Equities with a null value in the limited_partnership Morningstar fundamental field.
    not_lp_balance_sheet = morningstar.balance_sheet.limited_partnership.latest.isnull(
    )
    # Equities whose most recent Morningstar market cap is not null have fundamental data and therefore are not ETFs.
    have_market_cap = morningstar.valuation.market_cap.latest.notnull()
    is_cyclical = SuperSector().eq(SuperSector.CYCLICAL)
    is_defensive = SuperSector().eq(SuperSector.DEFENSIVE)
    is_sensitive = SuperSector().eq(SuperSector.SENSITIVE)

    sector = Sector()
    close = USEquityPricing.close

    # For filter
    tradeable_stocks = (primary_share
                        & common_stock
                        & not_depositary
                        & not_otc
                        & not_wi
                        & not_lp_name
                        & not_lp_balance_sheet
                        & have_market_cap
                        & (is_cyclical | is_defensive | is_sensitive))
    dollar_volume = AverageDollarVolume(window_length=30)
    high_dollar_volume = dollar_volume.percentile_between(98, 100)

    myscreen = (base_universe & high_dollar_volume)  # & tradeable_stocks

    # Pipeline
    pipe = Pipeline(columns={
        'yesterday_close': close.latest,
        'day_before_yesterday_close': PrevClose(),
        'day_before_yesterday_volume': PrevVolume(),
        'morningstar_sector_code': sector,
    },
                    screen=myscreen)
    return pipe
示例#4
0
def make_pipeline():
    """
    Create and return our pipeline.

    We break this piece of logic out into its own function to make it easier to
    test and modify in isolation.

    In particular, this function can be copy/pasted into research and run by itself.
    """

    # By appending .latest to the imported morningstar data, we get builtin Factors
    # so there's no need to define a CustomFactor
    value = Fundamentals.ebit.latest / Fundamentals.enterprise_value.latest
    quality = Fundamentals.roe.latest

    # Classify all securities by sector so that we can enforce sector neutrality later
    sector = Sector()

    # Screen out non-desirable securities by defining our universe.
    # Removes ADRs, OTCs, non-primary shares, LP, etc.
    # Also sets a minimum $500MM market cap filter and $5 price filter
    mkt_cap_filter = Fundamentals.market_cap.latest >= 500000000
    price_filter = USEquityPricing.close.latest >= 5
    universe = QTradableStocksUS() & price_filter & mkt_cap_filter

    # Construct a Factor representing the rank of each asset by our value
    # quality metrics. We aggregate them together here using simple addition
    # after zscore-ing them
    combined_rank = (value.zscore() + quality.zscore())

    # Build Filters representing the top and bottom 150 stocks by our combined ranking system.
    # We'll use these as our tradeable universe each day.
    longs = combined_rank.top(NUM_LONG_POSITIONS, mask=universe)
    shorts = combined_rank.bottom(NUM_SHORT_POSITIONS, mask=universe)

    # The final output of our pipeline should only include
    # the top/bottom 300 stocks by our criteria
    long_short_screen = (longs | shorts)

    # Create pipeline
    pipe = Pipeline(columns={
        'longs': longs,
        'shorts': shorts,
        'combined_rank': combined_rank,
        'quality': quality,
        'value': value,
        'sector': sector
    },
                    screen=long_short_screen)
    return pipe
示例#5
0
def make_pipeline(context):
    pipe = Pipeline()
    base_universe = QTradableStocksUS()
    dollar_volume = AverageDollarVolume(window_length=30)
    high_dollar_volume = dollar_volume.percentile_between(98, 100)

    close_day_before_yeseterday = ValueDaybeforeYesterday(
        inputs=[USEquityPricing.close])
    volume_day_before_yeseterday = ValueDaybeforeYesterday(
        inputs=[USEquityPricing.volume])
    pipe.add(close_day_before_yeseterday, "close_day_before_yeseterday")

    my_screen = base_universe & high_dollar_volume
    pipe.set_screen(my_screen)
    return pipe
示例#6
0
def  make_pipeline():

    base_universe = QTradableStocksUS()

    sentiment_score = SimpleMovingAverage(
        inputs=[stocktwits.bull_minus_bear],
        window_length=3,
    )

    return Pipeline(
        columns={
            'sentiment_score': sentiment_score,
        },
        screen=base_universe
    )
示例#7
0
def make_pipeline():

    not_near_earnings = ~((BusinessDaysUntilNextEarnings() <= 2) |
                          (BusinessDaysSincePreviousEarnings() <= 2))

    not_acq_tar = ~IsAnnouncedAcqTarget()

    universe = (QTradableStocksUS() & not_near_earnings & not_acq_tar)

    sentiment_score = SimpleMovingAverage(inputs=[stocktwits.bull_minus_bear],
                                          window_length=5,
                                          mask=universe)

    return Pipeline(columns={
        'sentiment_score': sentiment_score,
    },
                    screen=sentiment_score.notnull())
def make_pipeline():
    cash_return = Fundamentals.cash_return.latest
    universe = QTradableStocksUS() & cash_return.notnull()

    ranking = cash_return.rank(mask=universe)

    longs = ranking.percentile_between(95, 100)
    shorts = ranking.percentile_between(1, 5)

    long_short_screen = (longs | shorts)

    pipe = Pipeline(columns={
        'longs': longs,
        'shorts': shorts,
    },
                    screen=long_short_screen)
    return pipe
示例#9
0
def  make_pipeline():

    base_universe = QTradableStocksUS()


# STEP TWO, CREATE A FILTER
# sentiment_score is a simple moving average ... 
# where the inputs come from stocktwits.bull_minus_bear ...
# ... there's a bit of magic here with how this data works
# but not that it comes from pipeline.data.psychsignal import stocktwits 
# so it's obviously a data set that comes from a "psychsignal" library

    sentiment_score = SimpleMovingAverage(
        inputs=[stocktwits.bull_minus_bear],
        window_length=3,
    )

# Create filter for top 350 and bottom 350
# assets based on their sentiment scores
# NOTE: The "|" operator here is "OR" and creates a UNION. 
# The .top(350) and .bottom(350) is so powerful, wow.

    top_bottom_scores = (
        sentiment_score.top(350) | sentiment_score.bottom(350)
    )
    
# STEP THREE, return PIPELINE, AKA CREATE DATAFRAME AND SPECIFY SCREENING UNIVERSE.
# In place of "return Pipeline" maybe it's clearer to think... 
# return pandas dataframe ...
# What is Pipeline( ... ) doing? It's just defining columns of a pandas dataframe 
# and it's also inputing our specific SCREEN, that we've DEFINED 
# DEFINED as in... both base_universe and top_bottom_scores (that union we defined earlier)
# so Pipeline(...) is really taking these partial abstractions and running this targeted analysis. 
    
    return Pipeline(
        columns={
            'sentiment_score': sentiment_score,
        },
        # Set screen as the intersection between our filter
        # and trading universe
        screen=(
            base_universe
            & top_bottom_scores
        )
    )
示例#10
0
def make_pipeline():
    # Create a reference to our trading universe
    base_universe = QTradableStocksUS()

    # Get latest closing price
    close_price = USEquityPricing.close.latest

    # Calculate 3 day average of bull_minus_bear scores
    sentiment_score = SimpleMovingAverage(
        inputs=[stocktwits.bull_minus_bear],
        window_length=3,
    )

    # Return Pipeline containing close_price and
    # sentiment_score that has our trading universe as screen
    return Pipeline(columns={
        'close_price': close_price,
        'sentiment_score': sentiment_score,
    },
                    screen=base_universe)
示例#11
0
def make_pipeline():
    base_universe = QTradableStocksUS()
    sector = Sector()
    sector2 = morningstar.asset_classification.morningstar_sector_code.latest

    healthcare_sector = sector.eq(206)

    airline_sector = sector2.eq(31053108)

    sent = sentiment.sentiment_signal.latest

    longs = healthcare_sector and (sent > 3)

    shorts = airline_sector and (sent < 1)

    tradable_securities = (longs | shorts) and base_universe

    return Pipeline(columns={
        'longs': longs,
        'shorts': shorts
    },
                    screen=tradable_securities)
示例#12
0
def make_pipeline():

    # Latest p/e ratio.
    pe_ratio = Fundamentals.pe_ratio.latest

    # Number of days since the fundamental data was updated. In this example, we consider the p/e ratio
    # to be fresh if it's less than 4 days old.
    is_fresh = (BusinessDaysSincePreviousEvent(
        inputs=[Fundamentals.pe_ratio_asof_date]) <= 4)

    # Other indicators
    market_c = Fundamentals.market_cap.latest
    shares_o = Fundamentals.shares_outstanding.latest
    basic_e = Fundamentals.basic_eps.latest
    fcf = Fundamentals.free_cash_flow.latest

    # QTradableStocksUS is a pre-defined universe of liquid securities.
    universe = QTradableStocksUS() & is_fresh

    # Filter
    temp = market_c.filter()
    # Top 50 and bottom 50 stocks ranked by p/e ratio
    top_pe_stocks = pe_ratio.top(100, mask=universe)
    bottom_pe_stocks = pe_ratio.bottom(100, mask=universe)

    # Screen to include only securities tradable for the day
    securities_to_trade = (top_pe_stocks | bottom_pe_stocks)

    pipe = Pipeline(columns={
        'pe_ratio': pe_ratio,
        'longs': top_pe_stocks,
        'shorts': bottom_pe_stocks,
    },
                    screen=securities_to_trade)

    return pipe
def initialize(context):
    # Universe Selection
    # ------------------
    base_universe = QTradableStocksUS()

    # From what remains, each month, take the top UNIVERSE_SIZE stocks by average dollar
    # volume traded.
    monthly_top_volume = (AverageDollarVolume(
        window_length=LIQUIDITY_LOOKBACK_LENGTH).top(
            UNIVERSE_SIZE, mask=base_universe).downsample('week_start'))
    # The final universe is the monthly top volume &-ed with the original base universe.
    # &-ing these is necessary because the top volume universe is calculated at the start
    # of each month, and an asset might fall out of the base universe during that month.
    universe = monthly_top_volume & base_universe

    # Alpha Generation
    # ----------------
    # Compute Z-scores of free cash flow yield and earnings yield.
    # Both of these are fundamental value measures.
    fcf_zscore = Fundamentals.fcf_yield.latest.zscore(mask=universe)
    yield_zscore = Fundamentals.earning_yield.latest.zscore(mask=universe)
    sentiment_zscore = psychsignal.stocktwits.bull_minus_bear.latest.zscore(
        mask=universe)

    # Alpha Combination
    # -----------------
    # Assign every asset a combined rank and center the values at 0.
    # For UNIVERSE_SIZE=500, the range of values should be roughly -250 to 250.
    combined_alpha = (fcf_zscore + yield_zscore +
                      sentiment_zscore).rank().demean()

    beta = 0.66 * RollingLinearRegressionOfReturns(
        target=sid(8554),
        returns_length=5,
        regression_length=260,
        mask=combined_alpha.notnull() & Sector().notnull()).beta + 0.33 * 1.0

    # Schedule Tasks
    # --------------
    # Create and register a pipeline computing our combined alpha and a sector
    # code for every stock in our universe. We'll use these values in our
    # optimization below.
    pipe = Pipeline(
        columns={
            'alpha': combined_alpha,
            'sector': Sector(),
            'sentiment': sentiment_zscore,
            'beta': beta,
        },
        # combined_alpha will be NaN for all stocks not in our universe,
        # but we also want to make sure that we have a sector code for everything
        # we trade.
        screen=combined_alpha.notnull() & Sector().notnull() & beta.notnull(),
    )

    # Multiple pipelines can be used in a single algorithm.
    algo.attach_pipeline(pipe, 'pipe')
    algo.attach_pipeline(risk_loading_pipeline(), 'risk_loading_pipeline')

    # Schedule a function, 'do_portfolio_construction', to run twice a week
    # ten minutes after market open.
    algo.schedule_function(
        do_portfolio_construction,
        date_rule=algo.date_rules.week_start(),
        time_rule=algo.time_rules.market_open(
            minutes=MINUTES_AFTER_OPEN_TO_TRADE),
        half_days=False,
    )
def initialize(context):
    set_slippage(
        slippage.VolumeShareSlippage(volume_limit=0.025,
                                     price_impact=0.1))  # Default
    set_commission(commission.PerShare(cost=0.005,
                                       min_trade_cost=1.0))  # FSC for IB
    #Fetch SP500 DNN predictions
    fetch_csv(
        'https://docs.google.com/spreadsheets/d/e/2PACX-1vQAtxWEdzLnzdmU6zfTxk7RYgOIieg27z9em6sSd9Mm2mBpK46CXOY7EFrYjnZ3Vy9L8vYxBSICyEFz/pub?output=csv',
        date_column='Date',
        date_format='%m/%d/%y')
    context.stock = symbol('SPY')

    # Universe Selection
    # ------------------
    base_universe = QTradableStocksUS()

    # From what remains, each month, take the top UNIVERSE_SIZE stocks by average dollar
    # volume traded.
    monthly_top_volume = (AverageDollarVolume(
        window_length=LIQUIDITY_LOOKBACK_LENGTH).top(
            UNIVERSE_SIZE, mask=base_universe).downsample('week_start'))
    # The final universe is the monthly top volume &-ed with the original base universe.
    # &-ing these is necessary because the top volume universe is calculated at the start
    # of each month, and an asset might fall out of the base universe during that month.
    universe = monthly_top_volume & base_universe

    # Market Beta Factor
    # ------------------
    stock_beta = MyBeta()  #SimpleBeta(
    #target=sid(8554),
    #regression_length=21,
    #)
    # Alpha Generation
    # ----------------
    # Compute Z-scores of free cash flow yield and earnings yield.
    # Both of these are fundamental value measures.
    #fcf_zscore = Fundamentals.fcf_yield.latest.zscore(mask=universe)
    #yield_zscore = Fundamentals.earning_yield.latest.zscore(mask=universe)
    #sentiment_zscore = psychsignal.stocktwits.bull_minus_bear.latest.zscore(mask=universe)
    b = stock_beta.rank(mask=universe)
    # Alpha Combination
    # -----------------
    # Assign every asset a combined rank and center the values at 0.
    # For UNIVERSE_SIZE=500, the range of values should be roughly -250 to 250.
    #combined_alpha = (fcf_zscore + yield_zscore + sentiment_zscore).rank().demean()
    alpha = b.top(100)

    # Schedule Tasks
    # --------------
    # Create and register a pipeline computing our combined alpha and a sector
    # code for every stock in our universe. We'll use these values in our
    # optimization below.
    pipe = Pipeline(
        columns={
            'alpha': alpha,
        },
        # combined_alpha will be NaN for all stocks not in our universe,
        # but we also want to make sure that we have a sector code for everything
        # we trade.
        screen=alpha,
    )
    algo.attach_pipeline(pipe, 'pipe')

    # Schedule a function, 'do_portfolio_construction', to run twice a week
    # ten minutes after market open.
    algo.schedule_function(
        do_portfolio_construction,
        date_rule=algo.date_rules.every_day(),
        time_rule=algo.time_rules.market_open(
            minutes=MINUTES_AFTER_OPEN_TO_TRADE),
        half_days=False,
    )

    schedule_function(func=record_vars,
                      date_rule=date_rules.every_day(),
                      time_rule=time_rules.market_close(),
                      half_days=True)

    set_slippage(
        slippage.VolumeShareSlippage(volume_limit=0.025,
                                     price_impact=0.1))  # Default
    set_commission(commission.PerShare(cost=0.005,
                                       min_trade_cost=1.0))  # FSC for IB
示例#15
0
def initialize(context):

    context.s1 = sid(5061)
    context.s2 = sid(8655)
    context.s3 = sid(17787)
    context.s4 = sid(4707)
    context.s5 = sid(35920)
    context.s6 = sid(32146)
    context.s7 = sid(4283)
    context.s8 = sid(5885)
    context.s9 = sid(12002)
    context.s10 = sid(15596)
    context.s11 = sid(679)
    context.s12 = sid(1937)
    context.s13 = sid(8347)
    context.s14 = sid(3895)
    context.s15 = sid(14516)
    context.s16 = sid(14517)
    context.s17 = sid(21787)
    context.s18 = sid(21520)
    context.s19 = sid(21524)
    context.s20 = sid(23216)
    context.s21 = sid(19725)
    context.s22 = sid(3766)
    context.s23 = sid(49506)
    context.s24 = sid(5692)
    context.s25 = sid(24757)
    context.s26 = sid(7904)
    context.s27 = sid(20774)
    context.s28 = sid(7493)
    context.s29 = sid(438)
    context.s30 = sid(4914)

    context.asset_pairs = [[
        context.s1, context.s2, {
            'in_short': False,
            'in_long': False,
            'spread': np.array([]),
            'hedge_history': np.array([]),
            'residual': np.array([]),
            'z_scores': np.array([])
        }
    ],
                           [
                               context.s3, context.s4, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s5, context.s6, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s7, context.s8, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s9, context.s10, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s11, context.s12, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s13, context.s14, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s15, context.s16, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s17, context.s18, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s19, context.s20, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s21, context.s22, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s23, context.s24, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s25, context.s26, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s27, context.s28, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ],
                           [
                               context.s29, context.s30, {
                                   'in_short': False,
                                   'in_long': False,
                                   'spread': np.array([]),
                                   'hedge_history': np.array([]),
                                   'residual': np.array([]),
                                   'z_scores': np.array([])
                               }
                           ]]

    context.z_back = 20
    context.hedge_lag = 2
    context.z_entry = 2.0
    context.z_exit = 0.5
    context.pair_count = len(context.asset_pairs)
    context.margin = 0.5
    context.universe = QTradableStocksUS()

    #attach_pipeline(pipeline_data(context), 'fund_data')

    schedule_function(my_handle_data, date_rules.every_day(),
                      time_rules.market_close(hours=4))
示例#16
0
def initialize(context):

    schedule_function(
        func=periodic_rebalance,
        date_rule=date_rules.every_day(),  #week_start(days_offset=1),
        time_rule=time_rules.market_open(hours=.5))

    schedule_function(my_rebalance, date_rules.every_day(),
                      time_rules.market_open(hours=0, minutes=1))
    #schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_open(hours=1, minutes=1))
    schedule_function(my_rebalance, date_rules.every_day(),
                      time_rules.market_open(hours=2, minutes=1))
    #schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_open(hours=3, minutes=1))
    schedule_function(my_rebalance, date_rules.every_day(),
                      time_rules.market_open(hours=4, minutes=1))
    #schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_open(hours=5, minutes=1))
    schedule_function(my_rebalance, date_rules.every_day(),
                      time_rules.market_open(hours=6, minutes=1))

    #schedule_function(
    #    do_portfolio_construction,
    #    date_rule=algo.date_rules.week_start(),
    #    time_rule=algo.time_rules.market_open(minutes=30),
    #    half_days=False,
    #    )

    for hours_offset in range(7):
        schedule_function(my_rebalance,
                          date_rules.every_day(),
                          time_rules.market_open(hours=hours_offset,
                                                 minutes=10),
                          half_days=True)


#
# set portfolis parameters
#
    set_asset_restrictions(security_lists.restrict_leveraged_etfs)
    context.acc_leverage = 1.00
    context.min_holdings = 20
    context.s_min_holdings = 10
    #
    # set profit taking and stop loss parameters
    #
    context.profit_taking_factor = 0.01
    context.profit_taking_target = 100.0  #set much larger than 1.0 to disable
    context.profit_target = {}
    context.profit_taken = {}
    context.stop_pct = 0.97  # set to 0.0 to disable
    context.stop_price = defaultdict(lambda: 0)
    #
    # Set commission model to be used
    #
    set_slippage(
        slippage.VolumeShareSlippage(volume_limit=0.025,
                                     price_impact=0.1))  # Default
    set_commission(commission.PerShare(cost=0.005,
                                       min_trade_cost=1.0))  # FSC for IB

    # Define safe set (of bonds)
    #sid(32268) #SH
    context.safe = [
        sid(23870),  #IEF
        sid(23921),  #TLT
        #sid(8554), #SPY
    ]
    #
    # Define proxy to be used as proxy for overall stock behavior
    # set default position to be in safe set (context.buy_stocks = False)
    #
    context.canary = sid(22739)  #why not spy
    context.buy_stocks = False
    #
    # Establish pipeline
    #
    pipe = Pipeline()
    attach_pipeline(pipe, 'ranked_stocks')

    #
    # Define the four momentum factors used in ranking stocks
    #
    factor1 = simple_momentum(window_length=1)
    pipe.add(factor1, 'factor_1')
    factor2 = simple_momentum(window_length=60) / Volatility(window_length=60)
    pipe.add(factor2, 'factor_2')
    factor3 = simple_momentum(window_length=252)
    pipe.add(factor3, 'factor_3')
    factor4 = ((Momentum() / Volatility()) + Momentum())  #or Downside_Risk()
    pipe.add(factor4, 'factor_4')
    factor8 = earning_yield()
    pipe.add(factor8, 'factor8')
    factor9 = roe() + roic() + roa()
    pipe.add(factor9, 'factor9')
    factor10 = cash_return()
    pipe.add(factor10, 'factor10')
    factor11 = fcf_yield()
    pipe.add(factor11, 'factor11')
    factor12 = current_ratio()
    pipe.add(factor12, 'factor12')
    factor13 = Quality()
    pipe.add(factor13, 'factor13')
    factor14 = market_cap()
    pipe.add(factor14, 'factor14')
    factor15 = RnD_to_market() + capex()
    pipe.add(factor15, 'factor15')
    factor18 = EPS_Growth_3M()
    pipe.add(factor18, 'factor18')
    factor19 = Piotroski4()
    pipe.add(factor19, 'factor19')
    factor20 = capex()
    pipe.add(factor20, 'factor20')
    #
    # Define other factors that may be used in stock screening
    #
    #factor5 = get_fcf_per_share()
    #pipe.add(factor5, 'factor_5')
    factor6 = AverageDollarVolume(window_length=60)
    pipe.add(factor6, 'factor_6')
    factor7 = get_last_close()
    pipe.add(factor7, 'factor_7')

    #factor_4_filter = factor4 > 1.03   # only consider stocks with positive 1y growth
    #factor_5_filter = factor5 > 0.0   # only  consider stocks with positive FCF
    factor_6_filter = factor6 > .5e6  # only consider stocks trading >$500k per day
    #factor_7_filter = factor7 > 3.00  # only consider stocks that close above this value
    factor_12_filter = factor12 > .99
    #factor_8_filter = factor8 > 0
    #factor_15_filter = factor15 > factor6
    #factor_1_filter = factor1 > 1.1
    #factor_2_filter = factor2 > 1
    #factor_20_filter = factor20 > 0
    utilities_filter = Sector() != 207
    materials_filter = Sector() != 101
    energy_filter = Sector() != 309
    industrial_filter = Sector() != 310
    health_filter = Sector() != 206
    staples_filter = Sector() != 205
    real_estate_filter = Sector() != 104
    #sentiment_filter = ((0.5*st.bull_scored_messages.latest)>(st.bear_scored_messages.latest)) & (st.bear_scored_messages.latest > 10)
    consumer_cyclical_filter = Sector() != 102
    financial_filter = Sector() != 103
    communication_filter = Sector() != 308
    technology_filter = Sector != 311

    #Basic_Materials = context.output[context.output.sector == 101]
    #Consumer_Cyclical = context.output[context.output.sector == 102]
    #Financial_Services = context.output[context.output.sector == 103]
    #Real_Estate = context.output[context.output.sector == 104]
    #Consumer_Defensive = context.output[context.output.sector == 205]
    #Healthcare = context.output[context.output.sector == 206]
    #Utilities = context.output[context.output.sector == 207]
    #Communication_Services = context.output[context.output.sector == 308]
    #Energy = context.output[context.output.sector == 309]
    #Industrials = context.output[context.output.sector == 310]
    #Technology = context.output[context.output.sector == 311]
    #
    # Establish screen used to establish candidate stock list
    #
    mkt_screen = market_cap()
    cash_flow = factor10 + factor11
    price = factor14
    profitability = factor9
    #earning_quality = factor15
    stocks = QTradableStocksUS(
    )  #mkt_screen.top(3500)&profitability.top(3500)&factor19.top(2000)#&factor8.top(2000)#&price.top(2000)#&factor15.top(3000)#
    total_filter = (
        stocks
        & factor_6_filter
        #& factor_15_filter
        #& factor_8_filter
        #& factor_9_filter
        #& factor_1_filter
        #& factor_20_filter
        #& communication_filter
        #& consumer_cyclical_filter
        #& financial_filter
        #& staples_filter
        #& materials_filter
        #& industrial_filter
        #& factor_12_filter
        #& technology_filter
    )

    pipe.set_screen(total_filter)
    #
    # Establish ranked stock list
    #
    factor1_rank = factor1.rank(mask=total_filter, ascending=False)
    pipe.add(factor1_rank, 'f1_rank')
    factor2_rank = factor2.rank(mask=total_filter, ascending=False)
    pipe.add(factor2_rank, 'f2_rank')
    factor3_rank = factor3.rank(mask=total_filter,
                                ascending=False)  #significant effect
    pipe.add(factor3_rank, 'f3_rank')
    factor4_rank = factor4.rank(mask=total_filter,
                                ascending=False)  #significant effect
    pipe.add(factor4_rank, 'f4_rank')
    factor8_rank = factor8.rank(mask=total_filter,
                                ascending=False)  #significant effect
    pipe.add(factor8_rank, 'f8_rank')
    factor9_rank = factor9.rank(mask=total_filter,
                                ascending=False)  #very big effect
    pipe.add(factor9_rank, 'f9_rank')
    factor10_rank = factor10.rank(mask=total_filter, ascending=False)
    pipe.add(factor10_rank, 'f10_rank')
    factor11_rank = factor11.rank(mask=total_filter, ascending=False)
    pipe.add(factor11_rank, 'f11_rank')
    factor13_rank = factor13.rank(mask=total_filter,
                                  ascending=False)  #may want to remove
    pipe.add(factor13_rank, 'f13_rank')
    factor14_rank = factor14.rank(mask=total_filter, ascending=True)
    pipe.add(factor14_rank, 'f14_rank')
    factor15_rank = factor15.rank(mask=total_filter, ascending=False)
    pipe.add(factor15_rank, 'f15_rank')
    factor18_rank = factor18.rank(mask=total_filter, ascending=False)
    pipe.add(factor18_rank, 'f18_rank')
    factor19_rank = factor19.rank(mask=total_filter, ascending=False)
    pipe.add(factor19_rank, 'f19_rank')
    factor20_rank = factor20.rank(mask=total_filter, ascending=False)
    pipe.add(factor20_rank, 'f20_rank')

    combo_raw = (factor8_rank + factor18_rank + factor1_rank + factor4_rank +
                 factor10_rank + factor11_rank + factor15_rank + factor9_rank +
                 factor19_rank)  #+factor14_rank*.5)
    pipe.add(combo_raw, 'combo_raw')
    pipe.add(combo_raw.rank(mask=total_filter), 'combo_rank')