def test_getOffsetName(): assert_raises(Exception, getOffsetName, BDay(2)) assert getOffsetName(BDay()) == 'WEEKDAY' assert getOffsetName(BMonthEnd()) == 'EOM' assert getOffsetName(Week(weekday=0)) == 'W@MON' assert getOffsetName(Week(weekday=1)) == 'W@TUE' assert getOffsetName(Week(weekday=2)) == 'W@WED' assert getOffsetName(Week(weekday=3)) == 'W@THU' assert getOffsetName(Week(weekday=4)) == 'W@FRI'
def test_unpickle_legacy_frame(self): dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005', freq=BDay(1)) unpickled = self.frame self.assertEquals(type(unpickled.index), DatetimeIndex) self.assertEquals(len(unpickled), 10) self.assert_((unpickled.columns == Int64Index(np.arange(5))).all()) self.assert_((unpickled.index == dtindex).all()) self.assertEquals(unpickled.index.offset, BDay(1, normalize=True))
def test_unpickle_legacy_series(self): from pandas.core.datetools import BDay unpickled = self.series dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005', freq=BDay(1)) self.assertEquals(type(unpickled.index), DatetimeIndex) self.assertEquals(len(unpickled), 10) self.assert_((unpickled.index == dtindex).all()) self.assertEquals(unpickled.index.offset, BDay(1, normalize=True))
def test_getitem(self): def _check_getitem(sp, dense): for idx, val in compat.iteritems(dense): tm.assert_almost_equal(val, sp[idx]) for i in range(len(dense)): tm.assert_almost_equal(sp[i], dense[i]) # j = np.float64(i) # assert_almost_equal(sp[j], dense[j]) # API change 1/6/2012 # negative getitem works # for i in xrange(len(dense)): # assert_almost_equal(sp[-i], dense[-i]) _check_getitem(self.bseries, self.bseries.to_dense()) _check_getitem(self.btseries, self.btseries.to_dense()) _check_getitem(self.zbseries, self.zbseries.to_dense()) _check_getitem(self.iseries, self.iseries.to_dense()) _check_getitem(self.ziseries, self.ziseries.to_dense()) # exception handling self.assertRaises(Exception, self.bseries.__getitem__, len(self.bseries) + 1) # index not contained self.assertRaises(Exception, self.btseries.__getitem__, self.btseries.index[-1] + BDay())
def test_getOffset(): assert_raises(Exception, getOffset, 'gibberish') assert getOffset('WEEKDAY') == BDay() assert getOffset('EOM') == BMonthEnd() assert getOffset('W@MON') == Week(weekday=0) assert getOffset('W@TUE') == Week(weekday=1) assert getOffset('W@WED') == Week(weekday=2) assert getOffset('W@THU') == Week(weekday=3) assert getOffset('W@FRI') == Week(weekday=4)
def cc_returns(ts, **kwargs): start = kwargs.get('start', None) end = kwargs.get('end', dt.datetime.today()) delta = kwargs.get('deltaya', BDay()) period = kwargs.get('period', None) rets = returns(ts, type='net', start=start, end=end, delta=delta, period=period) return math.log(1 + rets)
def test_apply(self): tests = [] tests.append((bday, { datetime(2008, 1, 1): datetime(2008, 1, 2), datetime(2008, 1, 4): datetime(2008, 1, 7), datetime(2008, 1, 5): datetime(2008, 1, 7), datetime(2008, 1, 6): datetime(2008, 1, 7), datetime(2008, 1, 7): datetime(2008, 1, 8) })) tests.append((2 * bday, { datetime(2008, 1, 1): datetime(2008, 1, 3), datetime(2008, 1, 4): datetime(2008, 1, 8), datetime(2008, 1, 5): datetime(2008, 1, 8), datetime(2008, 1, 6): datetime(2008, 1, 8), datetime(2008, 1, 7): datetime(2008, 1, 9) })) tests.append((-bday, { datetime(2008, 1, 1): datetime(2007, 12, 31), datetime(2008, 1, 4): datetime(2008, 1, 3), datetime(2008, 1, 5): datetime(2008, 1, 4), datetime(2008, 1, 6): datetime(2008, 1, 4), datetime(2008, 1, 7): datetime(2008, 1, 4), datetime(2008, 1, 8): datetime(2008, 1, 7) })) tests.append((-2 * bday, { datetime(2008, 1, 1): datetime(2007, 12, 28), datetime(2008, 1, 4): datetime(2008, 1, 2), datetime(2008, 1, 5): datetime(2008, 1, 3), datetime(2008, 1, 6): datetime(2008, 1, 3), datetime(2008, 1, 7): datetime(2008, 1, 3), datetime(2008, 1, 8): datetime(2008, 1, 4), datetime(2008, 1, 9): datetime(2008, 1, 7) })) tests.append((BDay(0), { datetime(2008, 1, 1): datetime(2008, 1, 1), datetime(2008, 1, 4): datetime(2008, 1, 4), datetime(2008, 1, 5): datetime(2008, 1, 7), datetime(2008, 1, 6): datetime(2008, 1, 7), datetime(2008, 1, 7): datetime(2008, 1, 7) })) for dateOffset, cases in tests: for baseDate, expected in cases.iteritems(): assertEq(dateOffset, baseDate, expected)
def testRollback1(self): self.assertEqual(BDay(10).rollback(self.d), self.d)
def test_hasOffsetName(): assert hasOffsetName(BDay()) assert not hasOffsetName(BDay(2))
def main(): #for now im just going to account for the discover balance:just put balance on this one card in the future paycheckSpanDays=14 paycheckDate=datetime.date(2017,6,2) rent=775 rentDate=datetime.date(2017,6,1) discretionary=float(200) #discoverDate=datetime.date(2017,6,22) discoverDate=datetime.date(2017,6,13) chaseDate=datetime.date(2017,6,19) amexDate=datetime.date(2017,6,11) liabilities=(discoverDate,chaseDate,amexDate) today=datetime.date.today() rentDate=rentDate.replace(month=today.month+1) if today==(rentDate-BDay(6)).date(): Withdraw(rent) print('rent Date: {}'.format(rentDate)) #finds all the due dates i=0 re for liability in liabilities: #rent, discover,chase,amex #not tested liability=liability.replace(month=today.month) print('liability-5bday {}'.format((liability-BDay(6)).date())) if today==(liability-BDay(6)).date(): chase_pymt,amex_pymt,discover_pymt=FindCreditLiabilities() #turn this into class to cut out 10 lines of code if i==0: if float(discover_pymt)>250: Withdraw(discover_pymt) elif float(discover_pymt)>0: Withdraw(250) Deposit(250-discover_pymt) if i==1: if float(chase_pymt)>250: Withdraw(chase_pymt) elif float(chase_pymt)>0: Withdraw(250) Deposit(250-chase_pymt) if i==2: if float(amex_pymt)>250: Withdraw(amex_pymt) elif float(amex_pymt)>0: Withdraw(250) Deposit(250-amex_pymt) i=i+1 while paycheckDate< today: #finds next pay check date +1 NextPayCheck=paycheckDate+datetime.timedelta(days=paycheckSpanDays) print('NextPayCheck: {}'.format(NextPayCheck)) #finds if we will need to make a deposit doNotDeposit=False for liability in liabilities: #prevents depositing if you have a liability if today==liability-datetime.timedelta(days=1): doNotDeposit=True if today==paycheckDate & doNotDeposit==False: checking=FindChecking() Deposit(float(checking)-discretionary) try: driver.close() except: print("driver not open")
def test_offsets_compare_equal(self): # root cause of #456 offset1 = BDay() offset2 = BDay() self.assertFalse(offset1 != offset2)
def setUp(self): self.d = datetime(2008, 1, 1) self.offset = BDay() self.offset2 = BDay(2)
def test_onOffset(self): tests = [(BDay(), datetime(2008, 1, 1), True), (BDay(), datetime(2008, 1, 5), False)] for offset, date, expected in tests: assertOnOffset(offset, date, expected)
def test_apply_corner(self): self.assertRaises(Exception, BDay().apply, BMonthEnd())
def testRollforward1(self): self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self): self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def daily_returns(ts, **kwargs): relative = kwargs.get('relative', 0) return returns(ts, delta=BDay(), relative=relative)
def daily_returns(ts, **kwargs): ''' re-compute ts on a daily basis ''' relative = kwargs.get('relative', 0) return returns(ts, delta=BDay(), relative=relative)
def testMult2(self): self.assertEqual(self.d + (-5*BDay(-10)), self.d + BDay(50))
def testMult1(self): self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testSub(self): off = self.offset2 self.assertRaises(Exception, off.__sub__, self.d) self.assertEqual(2 * off - off, off) self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
class MDFMagics(Magics): """A component to manage the mdf magic functions""" __timestep = BDay(1) @line_magic def mdf_help(self, _=""): """Show the mdf ipython help""" mdf_pylab_help() @line_magic def mdf_ctx(self, parameter_s=""): """ Gets or sets the current context. %mdf_ctx [new_ctx] """ cur_ctx = _get_current_context() if parameter_s: ctx = eval(parameter_s, self.shell.user_global_ns, self.shell.user_ns) assert isinstance(ctx, MDFContext) ctx._activate_ctx() cur_ctx = ctx return cur_ctx @line_magic def mdf_now(self, parameter_s=""): """ Gets or sets the date of the current context. %mdf_now [date] """ curr_ctx = _get_current_context() if parameter_s: now = _parse_datetime(parameter_s, self.shell.user_global_ns, self.shell.user_ns) root_ctx = curr_ctx.get_parent() or curr_ctx root_ctx.set_date(now) return curr_ctx.get_date() @line_magic def mdf_reset(self, parameter_s=""): """ Resets the current mdf context, and optionally sets the current date. %mdf_reset [date] eg: %mdf_reset or: %mdf_reset 2010-01-01 """ if parameter_s: now = _parse_datetime(parameter_s, self.shell.user_global_ns, self.shell.user_ns) else: now = datetools.normalize_date(datetime.now()) ctx = MDFContext(now) ctx._activate_ctx() @line_magic def mdf_timestep(self, parameter_s=""): """ Gets/sets the timestep used to advance the date when calling %mdf_advance or %mdf_evalto. %mdf_timestep [offset] eg: %mdf_timestep or: %mdf_timestep WEEKDAY """ if parameter_s: self.__timestep = datetools.getOffset(parameter_s) return self.__timestep @line_magic def mdf_evalto(self, parameter_s=""): """ Advances the current context to the end date and return a pandas dataframe of nodes evaluated on each timestep. %mdf_evalto <end_date> [nodes...] eg: %mdf_evalto 2020-01-01 <my node 1> <my node 2> """ args = tokenize(parameter_s) cur_ctx = _get_current_context() root_ctx = cur_ctx.get_parent() or cur_ctx end_date, nodes = args[0], args[1:] end_date = _parse_datetime(end_date, self.shell.user_global_ns, self.shell.user_ns) nodes = map( lambda x: eval(x, self.shell.user_global_ns, self.shell.user_ns), nodes) df_ctx = root_ctx if len(nodes) > 0 and isinstance(nodes[-1], (dict, list, tuple)): shift_sets = _get_shift_sets(args[-1], nodes.pop()) assert len( shift_sets) <= 1, "Only one shift set allowed for %mdf_evalto" if shift_sets: unused, shift_set = shift_sets[0] df_ctx = df_ctx.shift(shift_set=shift_set) df_builder = DataFrameBuilder(nodes, filter=True) date_range = pd.DateRange(cur_ctx.get_date(), end_date, offset=self.__timestep) for dt in date_range: root_ctx.set_date(dt) df_builder(dt, df_ctx) return df_builder.get_dataframe(df_ctx) @line_magic def mdf_advance(self, parameter_s=""): """ Advance the current context one timestep (see %mdf_timestep). %mdf_advance [nodes...] If node is specified the value of node after the time has been advanced is returned. eg: %mdf_advance mdf.now """ args = tokenize(parameter_s) nodes = [] if args: nodes = map( lambda x: eval(x, self.shell.user_global_ns, self.shell.user_ns ), args) for node in nodes: assert isinstance(node, MDFNode) cur_ctx = _get_current_context() root_ctx = cur_ctx.get_parent() or cur_ctx root_ctx.set_date(root_ctx.get_date() + self.__timestep) if len(nodes) > 0: if len(nodes) == 1: return cur_ctx[nodes[0]] return [cur_ctx[node] for node in nodes] @line_magic def mdf_show(self, parameter_s=""): """ Opens a new mdf viewer and adds nodes to it, or adds the nodes to an existing viewer if one is open. %mdf_show [nodes...] """ args = tokenize(parameter_s) nodes = map( lambda x: eval(x, self.shell.user_global_ns, self.shell.user_ns), args) ctx = _get_current_context() viewer.show(nodes, ctx=ctx) @line_magic def mdf_selected(self, parameter_s=""): """ Return tuples of (ctx, node) for the currently selected nodes in the mdf viewer. %mdf_selected """ return viewer.get_selected() def _magic_dataframe(self, parameter_s, widepanel=False, single_df=True): """Implementation for magic_dataframe and magic_widepanel""" # the first two arguments are dates, and after that it's a list of nodes # with some optional keyword args, ie %mdf_df <start> <end> node, node, node, shifts=[{x:1}, {x:2}] args = arg_names = tokenize(parameter_s) args = [ _try_eval(x, self.shell.user_global_ns, self.shell.user_ns) for x in args ] args = list(zip(arg_names, args)) start = None if len(args) > 0: arg_name, arg = args.pop(0) start = _parse_datetime(arg_name, self.shell.user_global_ns, self.shell.user_ns) end = None if len(args) > 0: arg_name, arg = args.pop(0) end = _parse_datetime(arg_name, self.shell.user_global_ns, self.shell.user_ns) # the final argument can be the number of processes to use num_processes = 0 if len(args) > 0: arg_name, arg = args[-1] if isinstance(arg, basestring) and arg.startswith("||"): arg_name, arg = args.pop() num_processes = int(arg[2:]) # the next to last parameter may be a shift set or list of # shift sets. has_shifts = False shift_sets = [{}] # always have at least one empty shift set shift_names = ["_0"] arg_name, arg = args[-1] if len(args) > 0 else (None, None) if not isinstance(arg, MDFNode): arg_name, arg = args.pop() named_shift_sets = _get_shift_sets(arg_name, arg) if named_shift_sets: shift_names, shift_sets = zip(*named_shift_sets) has_shifts = True # any remaining arguments are the nodes nodes = [] node_var_names = [] for arg_name, node in args: assert isinstance(node, MDFNode), "%s is not a node" % arg_name nodes.append(node) node_var_names.append(arg_name) curr_ctx = _get_current_context() ctxs = [None] * len(nodes) if not nodes: # get the selected nodes from the viewer if _viewer_imported: selected = viewer.get_selected() ctxs, nodes = zip(*selected) for i, (ctx, node) in enumerate(selected): assert ctx.is_shift_of(curr_ctx), \ "selected node '%s' is not in the current context" % node.name # replace any contexts that are simply the current context with None # so that shifting works correctly if ctx is curr_ctx: ctxs[i] = None # if there are shifts then all the contexts have to be None otherwise the # shifts won't work correctly. This could be relaxed later if it causes problems, # but for now this makes the code simpler. if has_shifts: assert np.array([x is None for x in ctxs]).all(), \ "Can't apply shifts when contexts are explicitly specified" # list df_builders, one per node or group of nodes callbacks = [] df_builders = [] if widepanel or not single_df: # build multiple dataframes for node, ctx in zip(nodes, ctxs): if ctx is None: df_builder = DataFrameBuilder([node], filter=True) else: df_builder = DataFrameBuilder([node], contexts=[ctx], filter=True) df_builders.append(df_builder) else: # build a single dataframe if np.array([x is None for x in ctxs]).all(): df_builder = DataFrameBuilder(nodes, filter=True) else: df_builder = DataFrameBuilder(nodes, contexts=ctxs, filter=True) df_builders.append(df_builder) # add all the dataframe builders to the callbacks callbacks.extend(df_builders) root_ctx = curr_ctx.get_parent() or curr_ctx date_range = pd.DateRange(start, end, offset=self.__timestep) # Add a progress bar to the callbacks callbacks.append(ProgressBar(date_range[0], date_range[-1])) shifted_ctxs = run(date_range, callbacks, ctx=root_ctx, shifts=shift_sets, num_processes=num_processes) if not has_shifts: shifted_ctxs = [root_ctx] # when returning a list of results because multiple shifts have been specified # use a named tuple with the items being the names of the shifts tuple_ctr = tuple if has_shifts: # Currying hell yeah tuple_ctr = partial(ShiftedResultsTuple, shift_names) if widepanel: wps = [] for shift_name, shift_set, shifted_ctx in zip( shift_names, shift_sets, shifted_ctxs): wp_dict = {} for node_var_name, df_builder in zip(node_var_names, df_builders): wp_dict[node_var_name] = df_builder.get_dataframe( shifted_ctx) wp = pd.WidePanel.from_dict(wp_dict) if has_shifts: wp = WidePanelWithShiftSet(wp, shift_name, shift_set) wps.append(wp) if len(wps) == 1: return wps[0] return tuple_ctr(*wps) # list a list of lists of dataframes # [[dfs for one shift set], [dfs for next shift set], ...] df_lists = [] for shift_name, shift_set, shifted_ctx in zip(shift_names, shift_sets, shifted_ctxs): dfs = [] for df_builder in df_builders: df = df_builder.get_dataframe(shifted_ctx) if has_shifts: df = DataFrameWithShiftSet(df, shift_name, shift_set) dfs.append(df) df_lists.append(dfs) if single_df: # flatten into a single list (there should be one dataframe per shift) dfs = reduce(operator.add, df_lists, []) if len(dfs) == 1: return dfs[0] return tuple_ctr(*dfs) if len(df_lists) == 1: return df_lists[0] return tuple_ctr(*df_lists) @line_magic def mdf_df(self, parameter_s=""): """ Return a pandas dataframe of nodes evaluated over a date range. %mdf_df <start_date> <end_date> [nodes...] [[node=shift,...]] If no nodes are specified and the viewer is active the currently selected nodes are used. """ return self._magic_dataframe(parameter_s, widepanel=False, single_df=True) @line_magic def mdf_dfs(self, parameter_s=""): """ Return a list of pandas dataframes of nodes evaluated over a date range. %mdf_dfs <start_date> <end_date> [nodes...] [[node=shift,...]] If no nodes are specified and the viewer is active the currently selected nodes are used. """ return self._magic_dataframe(parameter_s, widepanel=False, single_df=False) @line_magic def mdf_wp(self, parameter_s=""): """ Return a pandas widepanel of nodes evaluated over a date range. %mdf_wp <start_date> <end_date> [nodes...] [[node=shift,...]] If no nodes are specified and the viewer is active the currently selected nodes are used. """ return self._magic_dataframe(parameter_s, widepanel=True) @line_magic def mdf_xl(self, parameter_s=""): """ Export to excel a list of nodes evaluated over a date range, or DataFrames. %mdf_xl <start_date> <end_date> [nodes...] If no nodes are specified and the viewer is active the currently selected nodes are used. Alternatively, export one or more DataFrames directly: %mdf_xl df1 [, dfN ...] """ args = tokenize(parameter_s) args = [ _try_eval(x, self.shell.user_global_ns, self.shell.user_ns) for x in args ] if not args: raise AssertionError("Usage: %mdf_xl <start> <end> nodes...") dfs = [] # if there is at least one DataFrame at the beginning, export them directly if args and isinstance(args[0], pd.DataFrame): dfs.extend((x for x in args if isinstance(x, pd.DataFrame))) else: # create one DataFrame of nodes evaluated over a date range dfs.append(self.mdf_df(self, parameter_s)) excel.export_dataframe(dfs) # return the DataFrame if there is only 1, the complete list otherwise. can be []. if len(dfs) == 1: return dfs[0] return dfs @line_magic def mdf_plot(self, parameter_s=""): """ Plot list of nodes evaluated over a date range %mdf_xl <start_date> <end_date> [nodes...] If no nodes are specified and the viewer is active the currently selected nodes are used. """ df = self.mdf_df(parameter_s) df.plot() @line_magic def mdf_vars(self, parameter_s=""): """ Print the values of varnodes a node or list of nodes are dependent on. %mdf_vars [<node>] [[category,...]] If no nodes are specified all nodes that are currently known about will be examined. """ categories = None if parameter_s.strip().endswith("]") and "[" in parameter_s: parameter_s, categories = parameter_s.rstrip("]").rsplit("[", 1) categories = [x.strip() for x in categories.strip().split(",")] parameter_s = parameter_s.strip() nodes = parameter_s.strip().split(" ") if parameter_s else [] nodes = map( lambda x: eval(x, self.shell.user_global_ns, self.shell.user_ns), nodes) curr_ctx = _get_current_context() for node in nodes: if not (node.has_value(curr_ctx) or node.was_called(curr_ctx)): _log.warn("%s has not yet been evaluated" % node.name) # get all the varnode values varnode_values = {} def vistor(node, ctx): if isinstance(node, MDFVarNode) \ and ctx is curr_ctx \ and node is not now: varnode_values[node] = ctx[node] return True curr_ctx.visit_nodes(vistor, root_nodes=nodes or None, categories=categories or None) # put the results in a dataframe with the ctx ids as columns nodes = sorted(varnode_values.keys(), key=lambda x: (sorted(x.categories), x.short_name)) df = pd.DataFrame(data={}, index=nodes, columns=["Value", "Category"], dtype=object) for node, value in varnode_values.items(): df["Value"][node] = value df["Category"][node] = ",".join( ["%s" % (c or "") for c in sorted(node.categories)]) if df.index.size == 0: print( "No matching dependencies found - has the node been evaluated?" ) return df.index = [n.short_name for n in df.index] print(df.to_string(float_format=lambda x: "%.3f" % x))
def testRollback2(self): self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))