def __init__(self, raw_data): custom_fields = self.__get_custom_fields__(raw_data) self.key = raw_data.key self.summary = raw_data.summary self.status = self.__get_status_str__(raw_data.status) self.description = raw_data.description self.assignee = str(raw_data.assignee).replace('_', ' ') self.reporter = str(raw_data.reporter).replace('_', ' ') self.tester = str(self.__get_custom_field_value__(custom_fields, TESTER)).replace('_', ' ') self.story_points = self.__get_custom_field_value__(custom_fields, STORY_POINTS) self.team = self.__get_custom_field_value__(custom_fields, TEAM) self.onshore_ba = self.__get_custom_field_value__(custom_fields, ONSHORE_BA) if self.onshore_ba: self.onshore_ba = self.onshore_ba.replace('_', ' ') self.desk_check = (False, True)[self.__get_custom_field_value__(custom_fields, DESK_CHECK) == 'Yes'] estimation_date = self.__get_custom_field_value__(custom_fields, ESTIMATION_DATE) if estimation_date: try: time_struct = strptime(estimation_date, "%d/%b/%y") self.estimation_date = datetime.fromtimestamp(mktime(time_struct)) except TypeError: LOGGER.warn('Issue {key} does not have valid estimation date'.format(key=self.key)) else: self.estimation_date = None epic_name = self.__get_custom_field_value__(custom_fields, EPIC_NAME) self.epic_key = epic_name self.epic_name = epic_name
def html_to_data_sets(html): soup = BeautifulSoup(html) table = soup.find("table") headings = [th.get_text() for th in table.find("tr").find_all("th")] data_sets = [] for row in table.find_all("tr")[1:]: data_set = zip(headings, (td.get_text() for td in row.find_all("td"))) data_sets.append(data_set) LOGGER.info('Page contains {count} data sets'.format(count=len(data_set))) return data_sets
def get_confluence_page_content(): env = confluence_rpc_init(LOGGER) auth = env['auth'] client = env['client'] namespace = env['namespace'] page_title = env['page_title'] LOGGER.info('Queuing Confluence for page with title {title}'.format(title=page_title)) page = client.getPage(auth, namespace, page_title) html = page.content LOGGER.info('Confluence returned {size} bytes of html data'.format(size=len(html))) return html
def store_outdated_issues(): env = jira_rpc_init(LOGGER) client = env['client'] auth = env['auth'] project_name = env['project_name'] request = '''Sprint in openSprints() AND project = {project} AND type in (Story, Bug, Improvement) AND status in (Defined,Undefined,"In Progress") AND "Estimation Date" <= endOfDay()'''.format(project=project_name) LOGGER.info('Queuing JIRA for outdated issues...') response = client.getIssuesFromJqlSearch(auth, request, Types.intType(20)) LOGGER.info('Response contains {count} issues'.format(count=len(response))) LOGGER.info('Removing all outdated issues from database...') OutdatedJiraIssue.objects.all().delete() LOGGER.info('Starting to import issues to database...') for issue in JiraUtil.__raw_data_to_issues_list__(response): message = '\tIssue {key} is being saved in database'.format(key=issue.key) LOGGER.info(message) outdated_jira_issue = JiraUtil.get_filled_in_outdated_jira_issue_obj(issue) outdated_jira_issue.save()
def save_table_records_to_database(table_records): all_items = table_records passed = [] ready = [] failed = [] other = [] total_sp = 0 passed_sp = 0 ready_sp = 0 failed_sp = 0 other_sp = 0 for item in all_items: if item.story_points: item.story_points = int(item.story_points) else: item.story_points = 0 skip = False try: total_sp += item.story_points except TypeError: skip = True LOGGER.info('Story {key} is not estimated'.format(key=item.key)) if item.desk_check_status == 'Pass': passed.append(item) if not skip: passed_sp += item.story_points elif item.desk_check_status == 'Ready': ready.append(item) if not skip: ready_sp += item.story_points elif item.desk_check_status == 'Failed': failed.append(item) if not skip: failed_sp += item.story_points else: other.append(item) if not skip: other_sp += item.story_points total = len(all_items) passed_count = len(passed) passed_percent = round(100.0 * len(passed) / total, 0) passed_percent_sp = round(100.0 * passed_sp / total_sp, 0) ready_count = len(ready) ready_percent = round(100.0 * len(ready) / total, 0) ready_percent_sp = round(100.0 * ready_sp / total_sp, 0) failed_count = len(failed) failed_percent = round(100.0 * len(failed) / total, 0) failed_percent_sp = round(100.0 * failed_sp / total_sp, 0) other_count = len(other) other_percent = 100 - passed_percent - ready_percent - failed_percent other_percent_sp = 100 - passed_percent_sp - ready_percent_sp - failed_percent_sp db_item = DeskCheckStatistic( total_count=total, total_count_sp=total_sp, other_count=other_count, other_count_sp=other_sp, other_percent=other_percent, other_percent_sp=other_percent_sp, passed_count=passed_count, passed_count_sp=passed_sp, passed_percent=passed_percent, passed_percent_sp=passed_percent_sp, ready_count=ready_count, ready_count_sp=ready_sp, ready_percent=ready_percent, ready_percent_sp=ready_percent_sp, failed_count=failed_count, failed_count_sp=failed_sp, failed_percent=failed_percent, failed_percent_sp=failed_percent_sp ) db_item.save()
def request(self): client = self.__env['client'] auth = self.__env['auth'] LOGGER.info('JIRA Advanced Search request: {request}'.format(request=self.__request)) self.__response = client.getIssuesFromJqlSearch(auth, self.__request, Types.intType(self.__limit))