def test_comap_maplike(): cm = comap(op.add(1), (1, 2, 3)) assert isinstance(cm, map) assert tuple(cm) == tuple(map(op.add(1), (1, 2, 3))) == (2, 3, 4) dm = comap(op.add, (1, 2, 3), (1, 2, 3)) assert tuple(dm) == (2, 4, 6)
def make_indented_body(body_str): """ Helper for generating an indented string to use as the body of a function. """ return '\n'.join( map( add(" "), dedent(body_str).splitlines(), ) )
def test_comap_send(): cm = comap(op.add(1), co()) assert next(cm) == 2 for n in (2, 3): assert cm.send(n) == n + 1 dm = comap(op.add, co(), co()) assert next(dm) == 2 for n in (2, 3): assert dm.send(n) == 2 * n
def test_updates_lnotab(): @instance class c(CodeTransformer): @pattern(Ellipsis) def _(self, instr): yield type(instr)(instr.arg).steal(instr) def f(): # pragma: no cover # this function has irregular whitespace for testing the lnotab a = 1 # intentional line b = 2 # intentional line c = 3 # intentional line return a, b, c original = Code.from_pyfunc(f) post_transform = c.transform(original) # check that something happened assert original.lnotab != post_transform.lnotab # check that we preserved the line numbers assert (original.lnotab.keys() == post_transform.lnotab.keys() == set( map(op.add(original.firstlineno), (2, 4, 6, 8)))) def sorted_instrs(lnotab): order = sorted(lnotab.keys()) for idx in order: yield lnotab[idx] # check that the instrs are correct assert all( map( Instruction.equiv, sorted_instrs(original.lnotab), sorted_instrs(post_transform.lnotab), )) # sanity check that the function is correct assert f() == c(f)()
def handle_exception(exc, *, _Done=Done, _type=type, _getframe=sys._getframe, _isinstance=isinstance, _Word=Word, _clear_cstack=clear_cstack): """Handle exceptions that are raised during phorth operations. Parameters ---------- exc : Exception The exception that was raised. Notes ----- This normally just prints the exception and restarts jumps us to the start of the repl with a clean stack. If ``exc`` is an instance of ``Done``, this will reraise the exception and kill the phorth session. """ if _isinstance(exc, _Done): # reraise the sentinel `Done` type raise Done() f = _getframe(1) cstack = _clear_cstack(f) print( 'traceback, most recent call last:\n %s\n%s: %s' % ( '\n '.join(map( str, concatv( map(op.add(1), reversed(cstack)), (exc.__traceback__.tb_lasti,), ))), _type(exc).__name__, exc, ), )
def handle_exception(exc, *, _Done=Done, _type=type, _getframe=sys._getframe, _isinstance=isinstance, _Word=Word, _clear_cstack=clear_cstack): """Handle exceptions that are raised during phorth operations. Parameters ---------- exc : Exception The exception that was raised. Notes ----- This normally just prints the exception and restarts jumps us to the start of the repl with a clean stack. If ``exc`` is an instance of ``Done``, this will reraise the exception and kill the phorth session. """ if _isinstance(exc, _Done): # reraise the sentinel `Done` type raise Done() f = _getframe(1) cstack = _clear_cstack(f) print( 'traceback, most recent call last:\n %s\n%s: %s' % ( '\n '.join( map( str, concatv( map(op.add(1), reversed(cstack)), (exc.__traceback__.tb_lasti, ), ))), _type(exc).__name__, exc, ), )
def _expected_data(self, asset_finder): sids = { symbol: asset_finder.lookup_symbol( symbol, self.asset_start, ).sid for symbol in self.symbols } def per_symbol(symbol): df = pd.read_csv( test_resource_path('quandl_samples', symbol + '.csv.gz'), parse_dates=['Date'], index_col='Date', usecols=[ 'Open', 'High', 'Low', 'Close', 'Volume', 'Date', 'Ex-Dividend', 'Split Ratio', ], na_values=['NA'], ).rename(columns={ 'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Volume': 'volume', 'Date': 'date', 'Ex-Dividend': 'ex_dividend', 'Split Ratio': 'split_ratio', }) df['sid'] = sids[symbol] return df all_ = pd.concat(map(per_symbol, self.symbols)).set_index( 'sid', append=True, ).unstack() # fancy list comprehension with statements @list @apply def pricing(): for column in self.columns: vs = all_[column].values if column == 'volume': vs = np.nan_to_num(vs) yield vs # the first index our written data will appear in the files on disk start_idx = ( self.calendar.all_sessions.get_loc(self.asset_start, 'ffill') + 1 ) # convert an index into the raw dataframe into an index into the # final data i = op.add(start_idx) def expected_dividend_adjustment(idx, symbol): sid = sids[symbol] return ( 1 - all_.ix[idx, ('ex_dividend', sid)] / all_.ix[idx - 1, ('close', sid)] ) adjustments = [ # ohlc { # dividends i(24): [Float64Multiply( first_row=0, last_row=i(24), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(24, 'AAPL'), )], i(87): [Float64Multiply( first_row=0, last_row=i(87), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(87, 'AAPL'), )], i(150): [Float64Multiply( first_row=0, last_row=i(150), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(150, 'AAPL'), )], i(214): [Float64Multiply( first_row=0, last_row=i(214), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(214, 'AAPL'), )], i(31): [Float64Multiply( first_row=0, last_row=i(31), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(31, 'MSFT'), )], i(90): [Float64Multiply( first_row=0, last_row=i(90), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(90, 'MSFT'), )], i(222): [Float64Multiply( first_row=0, last_row=i(222), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(222, 'MSFT'), )], # splits i(108): [Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=1.0 / 7.0, )], }, ] * (len(self.columns) - 1) + [ # volume { i(108): [Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=7.0, )], } ] return pricing, adjustments
def _expected_data(self, asset_finder): sids = { symbol: asset_finder.lookup_symbol( symbol, None, ).sid for symbol in self.symbols } # Load raw data from quandl test resources. data = load_data_table( file=test_resource_path( 'quandl_samples', 'QUANDL_ARCHIVE.zip' ), index_col='date' ) data['sid'] = pd.factorize(data.symbol)[0] all_ = data.set_index( 'sid', append=True, ).unstack() # fancy list comprehension with statements @list @apply def pricing(): for column in self.columns: vs = all_[column].values if column == 'volume': vs = np.nan_to_num(vs) yield vs # the first index our written data will appear in the files on disk start_idx = ( self.calendar.all_sessions.get_loc(self.start_date, 'ffill') + 1 ) # convert an index into the raw dataframe into an index into the # final data i = op.add(start_idx) def expected_dividend_adjustment(idx, symbol): sid = sids[symbol] return ( 1 - all_.ix[idx, ('ex_dividend', sid)] / all_.ix[idx - 1, ('close', sid)] ) adjustments = [ # ohlc { # dividends i(24): [Float64Multiply( first_row=0, last_row=i(24), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(24, 'AAPL'), )], i(87): [Float64Multiply( first_row=0, last_row=i(87), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(87, 'AAPL'), )], i(150): [Float64Multiply( first_row=0, last_row=i(150), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(150, 'AAPL'), )], i(214): [Float64Multiply( first_row=0, last_row=i(214), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(214, 'AAPL'), )], i(31): [Float64Multiply( first_row=0, last_row=i(31), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(31, 'MSFT'), )], i(90): [Float64Multiply( first_row=0, last_row=i(90), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(90, 'MSFT'), )], i(158): [Float64Multiply( first_row=0, last_row=i(158), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(158, 'MSFT'), )], i(222): [Float64Multiply( first_row=0, last_row=i(222), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(222, 'MSFT'), )], # splits i(108): [Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=1.0 / 7.0, )], }, ] * (len(self.columns) - 1) + [ # volume { i(108): [Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=7.0, )], } ] return pricing, adjustments
def _expected_data(self, asset_finder): sids = { symbol: asset_finder.lookup_symbol( symbol, self.asset_start, ).sid for symbol in self.symbols } def per_symbol(symbol): df = pd.read_csv( test_resource_path('quandl_samples', symbol + '.csv.gz'), parse_dates=['Date'], index_col='Date', usecols=[ 'Open', 'High', 'Low', 'Close', 'Volume', 'Date', 'Ex-Dividend', 'Split Ratio', ], na_values=['NA'], ).rename(columns={ 'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Volume': 'volume', 'Date': 'date', 'Ex-Dividend': 'ex_dividend', 'Split Ratio': 'split_ratio', }) df['sid'] = sids[symbol] return df all_ = pd.concat(map(per_symbol, self.symbols)).set_index( 'sid', append=True, ).unstack() # fancy list comprehension with statements @list @apply def pricing(): for column in self.columns: vs = all_[column].values if column == 'volume': vs = np.nan_to_num(vs) yield vs # the first index our written data will appear in the files on disk start_idx = self.calendar.get_loc(self.asset_start, 'ffill') + 1 # convert an index into the raw dataframe into an index into the # final data i = op.add(start_idx) def expected_dividend_adjustment(idx, symbol): sid = sids[symbol] return ( 1 - all_.ix[idx, ('ex_dividend', sid)] / all_.ix[idx - 1, ('close', sid)] ) adjustments = [ # ohlc { # dividends i(24): [Float64Multiply( first_row=0, last_row=i(24), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(24, 'AAPL'), )], i(87): [Float64Multiply( first_row=0, last_row=i(87), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(87, 'AAPL'), )], i(150): [Float64Multiply( first_row=0, last_row=i(150), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(150, 'AAPL'), )], i(214): [Float64Multiply( first_row=0, last_row=i(214), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(214, 'AAPL'), )], i(31): [Float64Multiply( first_row=0, last_row=i(31), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(31, 'MSFT'), )], i(90): [Float64Multiply( first_row=0, last_row=i(90), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(90, 'MSFT'), )], i(222): [Float64Multiply( first_row=0, last_row=i(222), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(222, 'MSFT'), )], # splits i(108): [Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=1.0 / 7.0, )], }, ] * (len(self.columns) - 1) + [ # volume { i(108): [Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=7.0, )], } ] return pricing, adjustments
def _expected_data(self, asset_finder): sids = { symbol: asset_finder.lookup_symbol( symbol, None, ).sid for symbol in self.symbols } # Load raw data from quandl test resources. data = load_data_table( file=test_resource_path("quandl_samples", "QUANDL_ARCHIVE.zip"), index_col="date", ) data["sid"] = pd.factorize(data.symbol)[0] all_ = data.set_index( "sid", append=True, ).unstack() # fancy list comprehension with statements @list @apply def pricing(): for column in self.columns: vs = all_[column].values if column == "volume": vs = np.nan_to_num(vs) yield vs # the first index our written data will appear in the files on disk start_idx = self.calendar.all_sessions.get_loc(self.start_date, "ffill") + 1 # convert an index into the raw dataframe into an index into the # final data i = op.add(start_idx) def expected_dividend_adjustment(idx, symbol): sid = sids[symbol] return (1 - all_.iloc[idx]["ex_dividend", sid] / all_.iloc[idx - 1]["close", sid]) adjustments = [ # ohlc { # dividends i(24): [ Float64Multiply( first_row=0, last_row=i(24), first_col=sids["AAPL"], last_col=sids["AAPL"], value=expected_dividend_adjustment(24, "AAPL"), ) ], i(87): [ Float64Multiply( first_row=0, last_row=i(87), first_col=sids["AAPL"], last_col=sids["AAPL"], value=expected_dividend_adjustment(87, "AAPL"), ) ], i(150): [ Float64Multiply( first_row=0, last_row=i(150), first_col=sids["AAPL"], last_col=sids["AAPL"], value=expected_dividend_adjustment(150, "AAPL"), ) ], i(214): [ Float64Multiply( first_row=0, last_row=i(214), first_col=sids["AAPL"], last_col=sids["AAPL"], value=expected_dividend_adjustment(214, "AAPL"), ) ], i(31): [ Float64Multiply( first_row=0, last_row=i(31), first_col=sids["MSFT"], last_col=sids["MSFT"], value=expected_dividend_adjustment(31, "MSFT"), ) ], i(90): [ Float64Multiply( first_row=0, last_row=i(90), first_col=sids["MSFT"], last_col=sids["MSFT"], value=expected_dividend_adjustment(90, "MSFT"), ) ], i(158): [ Float64Multiply( first_row=0, last_row=i(158), first_col=sids["MSFT"], last_col=sids["MSFT"], value=expected_dividend_adjustment(158, "MSFT"), ) ], i(222): [ Float64Multiply( first_row=0, last_row=i(222), first_col=sids["MSFT"], last_col=sids["MSFT"], value=expected_dividend_adjustment(222, "MSFT"), ) ], # splits i(108): [ Float64Multiply( first_row=0, last_row=i(108), first_col=sids["AAPL"], last_col=sids["AAPL"], value=1.0 / 7.0, ) ], }, ] * (len(self.columns) - 1) + [ # volume { i(108): [ Float64Multiply( first_row=0, last_row=i(108), first_col=sids["AAPL"], last_col=sids["AAPL"], value=7.0, ) ], } ] return pricing, adjustments
def _expected_data(self, asset_finder): sids = { symbol: asset_finder.lookup_symbol( symbol, None, ).sid for symbol in self.symbols } # Load raw data local db. data = _raw_data(self.symbols, self.start_date, self.end_date, self.columns) all_ = data.set_index( 'sid', append=True, ).unstack() # fancy list comprehension with statements @list @apply def pricing(): for column in self.columns: vs = all_[column].values if column == 'volume': vs = np.nan_to_num(vs) yield vs # the first index our written data will appear in the files on disk start_idx = ( self.calendar.all_sessions.get_loc(self.start_date, 'ffill') + 1) ######修改到此处 # convert an index into the raw dataframe into an index into the # final data i = op.add(start_idx) def expected_dividend_adjustment(idx, symbol): sid = sids[symbol] return (1 - all_.ix[idx, ('ex_dividend', sid)] / all_.ix[idx - 1, ('close', sid)]) adjustments = [ # ohlc { # dividends i(24): [ Float64Multiply( first_row=0, last_row=i(24), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(24, 'AAPL'), ) ], i(87): [ Float64Multiply( first_row=0, last_row=i(87), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(87, 'AAPL'), ) ], i(150): [ Float64Multiply( first_row=0, last_row=i(150), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(150, 'AAPL'), ) ], i(214): [ Float64Multiply( first_row=0, last_row=i(214), first_col=sids['AAPL'], last_col=sids['AAPL'], value=expected_dividend_adjustment(214, 'AAPL'), ) ], i(31): [ Float64Multiply( first_row=0, last_row=i(31), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(31, 'MSFT'), ) ], i(90): [ Float64Multiply( first_row=0, last_row=i(90), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(90, 'MSFT'), ) ], i(158): [ Float64Multiply( first_row=0, last_row=i(158), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(158, 'MSFT'), ) ], i(222): [ Float64Multiply( first_row=0, last_row=i(222), first_col=sids['MSFT'], last_col=sids['MSFT'], value=expected_dividend_adjustment(222, 'MSFT'), ) ], # splits i(108): [ Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=1.0 / 7.0, ) ], }, ] * (len(self.columns) - 1) + [ # volume { i(108): [ Float64Multiply( first_row=0, last_row=i(108), first_col=sids['AAPL'], last_col=sids['AAPL'], value=7.0, ) ], } ] return pricing, adjustments