def test_dao_get_sprints(dbsession): m1 = Metrics('1', 1, '2018-11-01T12:00', '2018-11-08T12:00', 'jira', timedelta(days=1), 1, 1, 0) m2 = Metrics('1', 2, '2018-11-09T12:00', '2018-11-16T12:00', 'jira', timedelta(days=1), 1, 1, 0) dao_add_sprint(m1) dao_add_sprint(m2) sprints = dao_get_sprints() assert len(sprints) == 2
def sample_metrics(dbsession): metrics = [] m1 = Metrics('1', 1, '2018-11-01T12:00', '2018-11-08T12:00', 'jira', timedelta(days=1), 1, 1, 0) m2 = Metrics('1', 2, '2018-11-09T12:00', '2018-11-16T12:00', 'jira', timedelta(days=1), 1, 1, 0) metrics.append(m1) metrics.append(m2) dao_add_sprint(m1) dao_add_sprint(m2) return metrics
def test_upsert_sprint_can_update_metric(dbsession): m = Metrics('upsert_id', '1', '2018-11-01T12:00', '2018-11-08T12:00', 'jira', timedelta(days=1), 1, 1, 0) dao_upsert_sprint(m) db_metrics = dbsession.query(TeamMetric).all() assert len(db_metrics) == 1 m.num_completed = 2 m.num_incomplete = 5 dao_upsert_sprint(m) db_metrics = dbsession.query(TeamMetric).all() assert len(db_metrics) == 1 assert db_metrics[0].num_completed == m.num_completed assert db_metrics[0].num_incomplete == m.num_incomplete
def test_upsert_sprint_can_add_metric(dbsession): m = Metrics('upsert_id', '1', '2018-11-01T12:00', '2018-11-08T12:00', 'jira', timedelta(days=1), 1, 1, 0) dao_upsert_sprint(m) db_metrics = dbsession.query(TeamMetric).all() assert len(db_metrics) == 1 assert db_metrics[0].project_id == m.project_id assert db_metrics[0].sprint_id == m.sprint_id
def test_dump_json(): m = Metrics('1', 'test_sprint', '2018-11-01T12:00', '2018-11-08T12:00', 'jira', timedelta(days=1), '1', 1, 0) from unittest.mock import patch, mock_open with patch("builtins.open", mock_open()) as mock_file: dump_json('test', [m]) mock_file.assert_called_with("data/test.json", 'w') mock_file().write.assert_called_once_with( '''[{"project_id": "1", "sprint_id": "test_sprint", "started_on": "2018-11-01T12:00", ''' '''"ended_on": "2018-11-08T12:00", "source": "jira", "avg_cycle_time": "1 days 00:00:00", ''' '''"process_cycle_efficiency": "1", "num_completed": 1, "num_incomplete": 0}]''' )
def test_add_sprint(dbsession): m = Metrics('1', 1, '2018-11-01T12:00', '2018-11-08T12:00', 'jira', timedelta(days=1), 1, 1, 0) dao_add_sprint(m) sprints = dao_get_sprints() assert len(sprints) == 1
def get_metrics(self, year=None, quarter=None): print("Pivotal") iteration_start = num_iterations = 0 if year and quarter: iteration_start, num_iterations = self.get_iteration_range( year, quarter) print("iterations: {} to {}".format(iteration_start, iteration_start + num_iterations)) metrics = [] for iteration in self.pivotal.get_project_iterations( offset=iteration_start, limit=num_iterations): cycle_time = process_cycle_efficiency = None num_stories_complete = num_stories_incomplete = 0 try: print("\nIteration: {} - {}".format(iteration['start'], iteration['finish'])) for story in iteration['stories']: if not story.get('accepted_at'): continue print(story['name']) started_at = self.get_started_at(story['id']) if not started_at: continue _cycle_time = get_time_diff(started_at, story['accepted_at']) if not _cycle_time: continue if cycle_time: cycle_time += _cycle_time else: cycle_time = _cycle_time print(' cycle_time: {}'.format(_cycle_time)) if story.get('accepted_at'): _process_cycle_efficiency = get_process_cycle_efficiency( _cycle_time, self.get_blocked_time(story['id'])) if _process_cycle_efficiency < 0: # this happens when a story gets started, has blockers, # so gets unstarted in a previous iteration # then restarted in this iteration and accepted # we need to decide whether we include the cycle time of a previous iteration? # also should teams unstart and then restart stories in a different iteration continue print(" process_cycle_efficiency: {}".format( _process_cycle_efficiency)) if process_cycle_efficiency: process_cycle_efficiency += _process_cycle_efficiency else: process_cycle_efficiency = _process_cycle_efficiency except ApiError as e: print('api error', e) num_stories_complete = len([ s for s in iteration['stories'] if s['current_state'] == 'accepted' ]) num_stories_incomplete = len( iteration['stories']) - num_stories_complete print("\n Number of accepted stories: {}".format( num_stories_complete)) print("\n Number of incomplete stories: {}".format( num_stories_incomplete)) m = Metrics( self.pivotal.project_id, iteration["number"], get_date_string(iteration["start"]), get_date_string(iteration["finish"]), "pivotal", 0 if not cycle_time else cycle_time / num_stories_complete, (process_cycle_efficiency / num_stories_complete) if num_stories_complete else 0, num_stories_complete, num_stories_incomplete) metrics.append(m) return metrics
def get_metrics(self, year=None, quarter=None): board = self.trello.get_board(self.board_id) print(board.name) sprints = [l for l in board.list_lists() if l.name == SPRINTS][0] sprints_cards = sorted(sprints.list_cards(), key=lambda d: d.description) done_lists = [] open_lists = [l for l in board.list_lists() if not l.closed] for _list in open_lists: sprint_card = [w for w in sprints_cards if w.name == _list.name] if sprint_card: _list.start_date = sprint_card[0].description.split(' - ')[0] _list.end_date = sprint_card[0].description.split(' - ')[1] done_lists.append(_list) done_lists = sorted(done_lists, key=lambda d: d.start_date) cards_in_done = 0 metrics = [] for _list in done_lists: if self.sprint_id and _list.id != self.sprint_id: continue print(" {}".format(_list.name)) cycle_time = timedelta(days=0) process_cycle_efficiency = 0 for card in _list.list_cards(): print(" {}".format(card.name)) in_progress_time = self.get_in_progress_time(card) if not in_progress_time: print('*** no progress on card', card.name) continue blocked_time = self.get_blocked_time(card) cycle_time += in_progress_time + blocked_time process_cycle_efficiency += get_process_cycle_efficiency( cycle_time, blocked_time) cards_in_done = len(_list.list_cards()) num_incomplete = self.get_cards_incomplete(board.list_lists(), _list.start_date, _list.end_date) m = Metrics(self.board_id, _list.id, _list.start_date, f"{_list.end_date} 23:59:59", "trello", 0 if not cycle_time else cycle_time / cards_in_done, (process_cycle_efficiency / cards_in_done) if cards_in_done else 0, cards_in_done, num_incomplete) metrics.append(m) print('Cards in Done: {}'.format(cards_in_done)) return metrics