def testLastWeekDay(self): """should return yesterday or friday if today is monday""" monday = datetime.date(2013, 05, 20) friday = datetime.date(2013, 05, 17) today = datetime.date(2013, 05, 22) yesterday = datetime.date(2013, 05, 21) self.assertEquals(yesterday, DateHelper.get_last_week_day(today), 'if not monday, should return yesterday') self.assertEquals(friday, DateHelper.get_last_week_day(monday), 'if monday should return last friday')
def parse_planned(self, plan, start_date, end_date): planned = dict() # Check for single value list (like [1,2,3]) if type(plan) is list and (type(plan[0]) is int or type(plan[0]) is float): days = DateHelper.get_all_days(start_date, end_date, False) if len(plan) < len(days): raise SyntaxError( "The planned list must contain one value per business days ({}), " "there are not enough values({})".format(len(plan), len(days)) ) if len(plan) > len(days): raise SyntaxError( "The planned list must contain one value per business days ({}), " "there are too many values ({})".format(len(plan), len(days)) ) for index, date in enumerate(days): planned[date.strftime("%Y-%m-%d")] = plan[index] # Check for date list else: for f in plan: dates = self.parse_date(f['date']) time = f['time'] for d in dates: planned[d.strftime("%Y-%m-%d")] = time return planned
def parse_planned(self, plan, start_date, end_date): planned = dict() # Check for single value list (like [1,2,3]) if type(plan) is list and (type(plan[0]) is int or type(plan[0]) is float): days = DateHelper.get_all_days(start_date, end_date, False) if len(plan) < len(days): raise SyntaxError( "The planned list must contain one value per business days ({}), " "there are not enough values({})".format( len(plan), len(days))) if len(plan) > len(days): raise SyntaxError( "The planned list must contain one value per business days ({}), " "there are too many values ({})".format( len(plan), len(days))) for index, date in enumerate(days): planned[date.strftime("%Y-%m-%d")] = plan[index] # Check for date list else: for f in plan: dates = self.parse_date(f['date']) time = f['time'] for d in dates: planned[d.strftime("%Y-%m-%d")] = time return planned
def _output(self, sprint, dates, graph_series, graph_end_date): # convert all y series to percents percent_series = OrderedDict() for name, serie in graph_series.items(): percent_series[name] = serie.get_values_as_percent() # add future days (up to graph_end_date) so that the graph looks more realistic if len(dates) and sprint.get_zebra_data('end_date') > dates[-1]: today = datetime.date.today() future_dates = DateHelper.get_future_days( sprint.get_zebra_data('end_date'), dates[-1] != today, False) for date in future_dates: dates.append(date) # generate main graph (sprint burnup) chart = SprintBurnUpChart.get_chart(dates, percent_series) # generate top graphs (result per serie) top_graph_series = ['md', 'sp', 'bv'] result_charts = {} for name, serie in graph_series.items(): if name in top_graph_series: result_charts[name] = ResultPerValuePie.get_chart( (serie.get_max_value(), serie.get_commited_value())) # collect all needed values for graph output args = [] args.append('{} ({})'.format( sprint.get_jira_data('sprint_name').replace('+', ' '), sprint.name)) args.append('Velocity: actual: {:.2f} expected: {:.2f}'.format( sprint.get_actual_velocity(), sprint.get_expected_velocity())) for name in result_charts: serie = graph_series.get(name) args.append( '{serie_name} {percent:.0f}%<br/>({result:.0f}/{max_value:.0f})' .format( serie_name=serie.name.upper(), percent=serie.get_result_as_percent(), result=serie.get_max_value(), max_value=serie.get_commited_value(), )) args.append(result_charts[name].render(is_unicode=True)) args.append(chart.render(is_unicode=True)) # generate the html structure and embed all values html_generator = SprintBurnupHtmlOutput(result_charts.keys()) html = html_generator.get_html_structure().format(*args) # write the graph to file path = 'sprint_burnup-%s-%s.html' % (UrlHelper.slugify( sprint.name), datetime.datetime.now().strftime("%Y%m%d")) graph_location = OutputHelper.write_to_file(path, html) print 'Your graph is available at %s' % graph_location
def _output(self, sprint, dates, graph_series, graph_end_date): # convert all y series to percents percent_series = OrderedDict() for name, serie in graph_series.items(): percent_series[name] = serie.get_values_as_percent() # add future days (up to graph_end_date) so that the graph looks more realistic if len(dates) and sprint.get_zebra_data('end_date') > dates[-1]: today = datetime.date.today() future_dates = DateHelper.get_future_days(sprint.get_zebra_data('end_date'), dates[-1] != today, False) for date in future_dates: dates.append(date) # generate main graph (sprint burnup) chart = SprintBurnUpChart.get_chart(dates, percent_series) # generate top graphs (result per serie) top_graph_series = ['md', 'sp', 'bv'] result_charts = {} for name, serie in graph_series.items(): if name in top_graph_series: result_charts[name] = ResultPerValuePie.get_chart((serie.get_max_value(), serie.get_commited_value())) # collect all needed values for graph output args = [] args.append('{} ({})'.format(sprint.get_jira_data('sprint_name').replace('+', ' '), sprint.name)) args.append( 'Velocity: actual: {:.2f} expected: {:.2f}'.format( sprint.get_actual_velocity(), sprint.get_expected_velocity() ) ) for name in result_charts: serie = graph_series.get(name) args.append('{serie_name} {percent:.0f}%<br/>({result:.0f}/{max_value:.0f})'.format( serie_name=serie.name.upper(), percent=serie.get_result_as_percent(), result=serie.get_max_value(), max_value=serie.get_commited_value(), )) args.append(result_charts[name].render(is_unicode=True)) args.append(chart.render(is_unicode=True)) # generate the html structure and embed all values html_generator = SprintBurnupHtmlOutput(result_charts.keys()) html = html_generator.get_html_structure().format(*args) # write the graph to file path = 'sprint_burnup-%s-%s.html' % ( UrlHelper.slugify(sprint.name), datetime.datetime.now().strftime("%Y%m%d") ) graph_location = OutputHelper.write_to_file(path, html) print 'Your graph is available at %s' % graph_location
def get_start_and_end_date(self, dates): """ Get a tuple with start and end dates. Default is none for end date, and last week-day for start_date :param dates:list of dates :return:tuple (start,end) """ date_objects = [dateutil.parser.parse(d, dayfirst=True) for d in dates] # default values is None for end_date and last week-day for start_date start_date = None end_date = None if date_objects is None or len(date_objects) == 0: date_objects.append(DateHelper.get_last_week_day()) if len(date_objects) == 1: start_date = ZebraHelper.zebra_date(date_objects[0]) if len(date_objects) == 2: start_date = ZebraHelper.zebra_date(min(date_objects)) end_date = ZebraHelper.zebra_date(max(date_objects)) return (start_date, end_date)
def testStartAndEndDates(self): """should return a tuple with start and end dates as zebra dates""" base_command = BaseCommand() self.assertEquals( ('2013-05-22', None), base_command.get_start_and_end_date(['2013.05.22']), 'test input format 1' ) self.assertEquals( ('2013-05-22', None), base_command.get_start_and_end_date(['22.05.2013']), 'test input format 2' ) self.assertNotEquals( (None, None), base_command.get_start_and_end_date([]), 'if no date is specified, date_start should not be None' ) last_week_day = ZebraHelper.zebra_date(DateHelper.get_last_week_day()) self.assertEquals( (last_week_day, None), base_command.get_start_and_end_date([]), 'if no date is specified, date_start should be last week day' )
def run(self, args): sprint_name = self.get_sprint_name_from_args_or_current(args.sprint_name) sprint = self.ensure_sprint_in_config(sprint_name) # end date for the graph can be specified via the --date arg. Defaults to yesterday try: graph_end_date = dateutil.parser.parse(args.date[0], dayfirst=True).date() except Exception as e: graph_end_date = datetime.date.today() - datetime.timedelta(days=1) # start fetching zebra data print 'Start fetching Zebra' zebra_manager = self.get_zebra_manager() timesheets = zebra_manager.get_timesheets_for_sprint(sprint) sprint.timesheet_collection = timesheets zebra_days = timesheets.group_by_day() print 'End Zebra' # start fetching jira data print 'Start fetching Jira' Story.closed_status_ids = sprint.get_closed_status_codes() jira_manager = self.get_jira_manager() stories = jira_manager.get_stories_for_sprint_with_end_date(sprint) sprint.story_collection = stories print 'End Jira' # define x serie dates = [] # define all y series serie_collection = SprintBurnupSeries() sprint.serie_collection = serie_collection # set commited value by serie serie_collection.get('md').ideal_value = float(sprint.commited_man_days) * 8 serie_collection.get('sp').ideal_value = stories.get_commited('sp') serie_collection.get('bv').ideal_value = stories.get_commited('bv') # loop through all sprint days and gather values days = DateHelper.get_all_days(sprint.get_zebra_data('start_date'), sprint.get_zebra_data('end_date'), True) for date in days: time_without_forced = 0 zebra_day = zebra_days.get(str(date)) if zebra_day is not None: time_without_forced = zebra_day.time # check for forced zebra values total_time = sprint.get_forced_data(str(date), time_without_forced) planned_time = sprint.get_planned_data(str(date)) # output data for this day to the console (useful but not necessary for this command if total_time != 0: print date entries_per_user = zebra_day.get_entries_per_user() for user, time in entries_per_user.items(): print "%s : %s" % (user, time) planned_str = '' if planned_time is None else '(Planned: ' + str(planned_time) + ')' # print total time per day (with and/or without forced values) if time_without_forced == total_time: print 'Total: %s %s' % (total_time, planned_str) else: print 'Total (without forced data): %s' % time_without_forced print 'Total including forced data: %s %s' % (total_time, planned_str) print '' # end of output # get jira achievement for this day (bv/sp done) jira_data = stories.get_achievement_for_day(str(date)) # if we have some time, story closed for this day or planned time, add it to graph data if jira_data is not None or total_time != 0 or planned_time is not None: dates.append(date) for serie in serie_collection.values(): # only add data for dates > graph_end_date for "planned" (not md, sp, bv...) if serie.name == 'planned': serie.cumulate(planned_time) elif serie.name == 'md': if date <= graph_end_date: # for md, sp, bv dont add data if date is after graph_end_date serie.cumulate(total_time) else: if date <= graph_end_date: # for md, sp, bv dont add data if date is after graph_end_date serie.cumulate(None if jira_data is None else jira_data[serie.name]) # get only meaningfull series (ie. don't use BV if the team doesnt use it) graph_series = serie_collection.get_series_for_chart() self._output(sprint, dates, graph_series, graph_end_date)
def run(self, args): sprint_name = self.get_sprint_name_from_args_or_current( args.sprint_name) sprint = self.ensure_sprint_in_config(sprint_name) # end date for the graph can be specified via the --date arg. Defaults to yesterday try: graph_end_date = dateutil.parser.parse(args.date[0], dayfirst=True).date() except Exception as e: graph_end_date = datetime.date.today() - datetime.timedelta(days=1) # start fetching zebra data print 'Start fetching Zebra' zebra_manager = self.get_zebra_manager() timesheets = zebra_manager.get_timesheets_for_sprint(sprint) sprint.timesheet_collection = timesheets zebra_days = timesheets.group_by_day() print 'End Zebra' # start fetching jira data print 'Start fetching Jira' Story.closed_status_ids = sprint.get_closed_status_codes() jira_manager = self.get_jira_manager() stories = jira_manager.get_stories_for_sprint_with_end_date(sprint) sprint.story_collection = stories print 'End Jira' # define x serie dates = [] # define all y series serie_collection = SprintBurnupSeries() sprint.serie_collection = serie_collection # set commited value by serie serie_collection.get('md').ideal_value = float( sprint.commited_man_days) * 8 serie_collection.get('sp').ideal_value = stories.get_commited('sp') serie_collection.get('bv').ideal_value = stories.get_commited('bv') # loop through all sprint days and gather values days = DateHelper.get_all_days(sprint.get_zebra_data('start_date'), sprint.get_zebra_data('end_date'), True) for date in days: time_without_forced = 0 zebra_day = zebra_days.get(str(date)) if zebra_day is not None: time_without_forced = zebra_day.time # check for forced zebra values total_time = sprint.get_forced_data(str(date), time_without_forced) planned_time = sprint.get_planned_data(str(date)) # output data for this day to the console (useful but not necessary for this command if total_time != 0: print date entries_per_user = zebra_day.get_entries_per_user() for user, time in entries_per_user.items(): print "%s : %s" % (user, time) planned_str = '' if planned_time is None else '(Planned: ' + str( planned_time) + ')' # print total time per day (with and/or without forced values) if time_without_forced == total_time: print 'Total: %s %s' % (total_time, planned_str) else: print 'Total (without forced data): %s' % time_without_forced print 'Total including forced data: %s %s' % (total_time, planned_str) print '' # end of output # get jira achievement for this day (bv/sp done) jira_data = stories.get_achievement_for_day(str(date)) # if we have some time, story closed for this day or planned time, add it to graph data if jira_data is not None or total_time != 0 or planned_time is not None: dates.append(date) for serie in serie_collection.values(): # only add data for dates > graph_end_date for "planned" (not md, sp, bv...) if serie.name == 'planned': serie.cumulate(planned_time) elif serie.name == 'md': if date <= graph_end_date: # for md, sp, bv dont add data if date is after graph_end_date serie.cumulate(total_time) else: if date <= graph_end_date: # for md, sp, bv dont add data if date is after graph_end_date serie.cumulate(None if jira_data is None else jira_data[serie.name]) # get only meaningfull series (ie. don't use BV if the team doesnt use it) graph_series = serie_collection.get_series_for_chart() self._output(sprint, dates, graph_series, graph_end_date)