async def mc_bug(message, issues): jira = JIRA( server="https://bugs.mojang.com", auth=(mojira_username, mojira_password), ) for issueid in issues[:3]: try: issue = jira.issue(issueid[0]) embed = discord.Embed( color=0xA7D9FC, title=str.upper(issueid[0]), description=f"**{issue.fields.summary}**", url=f"https://bugs.mojang.com/browse/{issueid[0]}", ) embed.add_field(name="Status", value=issue.fields.status) embed.add_field(name="Resolution", value=issue.fields.resolution) embed.set_footer(text=f"created: {issue.fields.created[:10]}") await message.channel.send(embed=embed) except: try: await message.channel.send(f"{issueid[0]} does not exist") except: await message.channel.send(f"f**k off {message.author.mention}" ) jira.close()
def load_users(): j_config = configparser.ConfigParser() j_config.read('config.ini') username = j_config['jira_config']['username'] secret_token = j_config['jira_config']['secret_token'] server = j_config['jira_config']['server'] # Instantiate Jira connection jira = JIRA(server, basic_auth=(username, secret_token)) start_point = 0 load = 999 max_results = 200 try: while load >= max_results: users = jira.search_users(user='******', startAt=start_point, includeInactive=True, maxResults=max_results) load = sum(1 for e in users) logger.info("Fetching users {} - {}".format( start_point, start_point + load)) for u in users: jira_model.insert_user(u.name, u.displayName, u.emailAddress, u.active) start_point = start_point + max_results except: logger.exception("Message") finally: jira.close() return
def limited_bug(issueid): jira_access = JIRA( server='https://bugs.mojang.com', basic_auth=(mojira_username, mojira_password), ) # Get the bug as an issue object. issue = jira_access.issue(issueid) # Now we will get different attributes from the bug and put them into a nice embed. status = issue.fields.status embed = discord.Embed(color=bug_colour_mappings[str(status)], title='**{}**: {}'.format(str.upper(issueid), issue.fields.summary), url=f'https://bugs.mojang.com/browse/{issueid}') reporter = issue.fields.reporter.name.replace(" ", "+") embed.set_author( name=issue.fields.creator, icon_url=getattr(issue.fields.reporter.avatarUrls, '48x48'), url='https://bugs.mojang.com/secure/ViewProfile.jspa?name={}'.format( reporter)) # Get the creation date and format status, resolution and votes. date_time = issue.fields.created.split("T") embed.set_footer( text='created on {} at {}'.format(date_time[0], date_time[1][:-9])) embed.description = '**Status:** {} | **Resolution:** {} | **Votes:** {}'.format( status, issue.fields.resolution, issue.fields.votes) jobs = jira_access._session._async_jobs print(jobs) jira_access.close() return embed
def extended_bug(issueid): jira_access = JIRA( server='https://bugs.mojang.com', basic_auth=(mojira_username, mojira_password), ) # get different attributes from the but issue = jira_access.issue(issueid) status = issue.fields.status embed = limited_bug(issueid) embed = set_description(issue, embed, 500) embed.add_field(name='Status', value=status) embed.add_field(name='Resolution', value=issue.fields.resolution) embed.add_field(name='Votes', value=issue.fields.votes) embed = add_versions(issue, embed) embed = add_labels(issue, embed) embed = add_category(issue, embed) embed = add_category(issue, embed) embed = add_priority(issue, embed) embed = add_image(issue, embed) jira_access.close() return embed
def load_issues(jql): success = None try: # Assign Jira configuration j_config = configparser.ConfigParser() j_config.read('config.ini') username = j_config['jira_config']['username'] secret_token = j_config['jira_config']['secret_token'] server = j_config['jira_config']['server'] # Instantiate Jira connection jira = JIRA(server, basic_auth=(username, secret_token)) start_point = 0 stop_point = 99999 max_results = 100 while start_point < stop_point: # Get issues with JQL all_issues = jira.search_issues( jql_str=jql, maxResults=max_results, startAt=start_point, fields='key,summary,issuetype,status,project,customfield_10118,' 'resolution,created,updated,resolutiondate') stop_point = all_issues.total logger.info("Fetching {} - {} of {} issues".format( start_point, min(start_point + max_results, stop_point), stop_point)) for issue in all_issues: if hasattr(issue.fields.issuetype, "name"): issue_type = issue.fields.issuetype.name else: issue_type = None if hasattr(issue.fields.status, "name"): status = issue.fields.status.name else: status = None if hasattr(issue.fields.resolution, "name"): resolution = issue.fields.resolution.name else: resolution = None jira_model.insert_issue( issue.fields.summary, issue.key, issue_type, status, issue.fields.project.key, issue.fields.customfield_10118, resolution, issue.fields.created, issue.fields.updated, issue.fields.resolutiondate) start_point = start_point + max_results success = True except: success = False logger.exception("Message") finally: jira.close() return success
def get_tickets_dict(ticket_ids, config): ''' takes in set of ticket_ids and returns dictionary with ticket_ids as keys and API data as values a ticket_id with a value of 0 will be ignored ''' # initialize ticket dictionary ticket_dict = {} # check for username in config and use create tuple for basic auth jira_username = config.get('jira_username', None) if jira_username: auth = (config['jira_username'], config['jira_password']) # try to use Personal Access Token instead else: auth = config['jira_token'] options = {"server": config['jira_url'], "verify": config['certificate']} jira = None # iterate through ticket ids from set for ticket_id in ticket_ids: # a ticket_id value of 0 is used as a placeholder, not a valid ticket # skip as there is no API data to be fetched in this case if ticket_id == 0: continue # get ticket info from jira API try: # initialize connection if it has not yet been done (either first iteration or previously failed) if jira is None: if jira_username: jira = JIRA(basic_auth=auth, options=options) else: jira = JIRA(token_auth=auth, options=options) issue = jira.issue(ticket_id) ticket_status = '[' + str(issue.fields.status) + ']' ticket_summary = issue.fields.summary ticket_name = ' '.join([ticket_status.upper(), ticket_summary]) except Exception as e: print("Jira API Call Error: ", e) ticket_name = ticket_id jira = None finally: ticket_url = config['jira_url'] + "/browse/" + str(ticket_id) ticket_dict[ticket_id] = { 'ticket_name': ticket_name, 'ticket_url': ticket_url } # close Jira connection if open if jira is not None: jira.close() return ticket_dict
def get_objects(project,fixver=None,obj_id=None,fields=None): cnf = get_conf('JIRA') server = cnf['server_url'] user_name = cnf['username'] pwd = cnf['password'] objs=[] obj={} try: if obj_id: jql_str ='issue = "{}" and issuetype in ("Story","Bug") '.format(obj_id) else: jql_str ='project = "{}" and issuetype in ("Story","Bug") and fixVersion = "{}" '.format(project,fixver) jira = JIRA(server=server,basic_auth=(user_name,pwd)) if not fields: fields='summary,reporter,issuetype,project,fixVersions,customfield_10401,customfield_10402' else: if fields.find('fixVersions') <0: fields+=',fixVersions' object_list = jira.search_issues(jql_str=jql_str,fields=fields,maxResults=200) print(len(object_list)) for i in range(0,len(object_list)): fix_ver = object_list[i].fields.fixVersions if (fix_ver and str(fix_ver[0]).strip()==str(fixver).strip()) or (obj_id): obj['object_id']=object_list[i].key obj['object_desc']=object_list[i].fields.summary obj['object_rel']=str(fix_ver[0]).strip() if object_list[i].fields.project: obj['object_track']=object_list[i].fields.project.name if object_list[i].fields.customfield_10401: lst = object_list[i].fields.customfield_10401 tmp = '' for k in lst: tmp+=k.displayName+' | ' obj['object_dev'] = tmp else: obj['object_dev']=None if object_list[i].fields.customfield_10402: lst = object_list[i].fields.customfield_10402 tmp = '' for k in lst: tmp+=k.displayName+' | ' obj['object_qa'] = tmp else: obj['object_qa']=None if object_list[i].fields.issuetype: obj['object_type']=object_list[i].fields.issuetype.name objs.append(obj.copy()) obj.clear() except Exception as e: print(e) finally: jira.close() return objs
class JiraSession(object): def __init__(self, server, account, password): self.server = server self.account = account self.password = password self.jira_session = JIRA(server=self.server, basic_auth=(self.account, self.password)) def __enter__(self): assert self.jira_session.current_user() == self.account return self def __exit__(self, exc_type, exc_val, exc_tb): self.jira_session.close() def search_issues(self, jql): """ Search issues via JQL :param jql: :return: """ logging.info(u'JIRA Search: %s' % jql) return self.jira_session.search_issues(jql_str=jql, maxResults=128, json_result=True) def get_projects(self): """ Get jira projects :return: <key, name, id> """ logging.info(u'Get JIRA Projects') return self.jira_session.projects() def get_issue_types(self): """ Get jira issue types :return: <name, id> """ logging.info(u'Get JIRA Issue Types') return self.jira_session.issue_types() def get_issue_statuses(self): """ Get issue statuses :return: <name, id> """ logging.info(u'Get JIRA Issue Statuses') return self.jira_session.statuses() def get_user(self): """ Get jira user :return: """ logging.info(u'Get JIRA Current User') return self.jira_session.current_user()
def get_jira_dict(ticket_ids, config): ''' takes in set of ticket_ids and returns dictionary with ticket_ids as keys and API data as values ''' # initialize ticket dictionary tickets = {} # initialize jira variable and config options auth = (config['jira_username'], config['jira_password']) options = {"server": config['jira_url'], "verify": config['certificate']} jira = None # iterate through ticket ids from set for ticket_id in ticket_ids: # 0 should be default in YAML file (i.e. no tickers recorded) # if there is a 0 entry then that should be the only "ticket", so break if ticket_id == 0: tickets[0] = { 'ticket_name': 'No ticket on file', 'ticket_url': None } continue # get ticket info from jira API try: # initialize connection if it has not yet been done (either first iteration or previously failed) if jira is None: jira = JIRA(auth=auth, options=options) issue = jira.issue(ticket_id) ticket_status = '[' + str(issue.fields.status) + ']' ticket_summary = issue.fields.summary ticket_name = ' '.join([ticket_status.upper(), ticket_summary]) except Exception as e: print("Jira API Call Error: ", e) ticket_name = ticket_id jira = None finally: ticket_url = config['jira_url'] + "/browse/" + str(ticket_id) tickets[ticket_id] = { 'ticket_name': ticket_name, 'ticket_url': ticket_url } # close Jira connection if open if jira is not None: jira.close() return tickets
def get_versions(bot): jira_access = JIRA( server="https://bugs.mojang.com", basic_auth=(mojira_username, mojira_password), ) versions = jira_access.project("MC").versions bot.mc_versions = {str(version) for version in versions} bot.latest_version = str(versions[-1]) bot.previous_version = str(versions[-2]) bot.latest_version_archive_status = versions[-1].archived bot.previous_version_archive_status = versions[-2].archived bot.latest_version_release_status = versions[-1].released jira_access.close()
def create_issue_eddid(log_info, title): options = {'server': 'http://172.16.10.243:8080/'} jira = JIRA(options, basic_auth=log_info) summary = "[新老版本对比][委托下单]{}".format(title) description = ''' [请求参数] { "MF": 301, "thread_id": {{thread_id}}, "fund_account": "", "exchange_type": "K", "stock_account": "3001331112", "stock_code": "00008", "entrust_amount": 10000, "entrust_price": 0.047, "entrust_bs": "B", "entrust_prop": "e", "op_station": "isX5db4FstXj2MdW3weZGcpAktjHG4nM", "session_no": {{session_no}} } [老接口响应数据] {'error_no': -9999, 'error_info': '用户编号或密码无效', 'error_info_ansi': '�û���Ż�������Ч'} [新接口响应数据] {'error_no': -1, 'error_info': '由于长时间未操作或其他原因,为确保您的交易安全,请重新登录.', 'error_info_ansi': '���ڳ�ʱ��δ����������ԭ��Ϊȷ�����Ľ��װ�ȫ�������µ�¼.'} [期望结果] 新老接口, 响应数据一致 ''' issue_dict = { 'project': {'key': 'AYER'}, # 项目 'issuetype': {'id': '10004'}, # 问题类型 'priority': {'id': '3'}, # 优先级 'summary': summary, # 问题主题 'assignee': {'name': 'wuchaozhen'}, # 经办人 'description': description, # 问题描述 'customfield_10203' : {'id' : '10112'}, } rsp = jira.create_issue(issue_dict) issue = jira.issue(rsp) print(issue.key, issue.fields.summary, issue.fields.status) jira.close()
class jiraclient(object): def __init__(self): url = oc.od.settings.jira.get('url') username = oc.od.settings.jira.get('username') apikey = oc.od.settings.jira.get('apikey') self.project_id = oc.od.settings.jira.get('project_id') self.jira = None if all([url, self.project_id, username, apikey]): try: self.jira = JIRA(url, basic_auth=(username, apikey)) except Exception as e: logger.error('Init jira failed %s', e) def __del__(self): if self.jira: self.jira.close() def isenable(self): if self.jira: return True return False def issue(self, summary, description, issuetype): submitissue = None if self.jira is None: return submitissue # make sure that issuetype dict has a name entry # if issuetype.get('name') is None: issuetype['name'] = 'Bug' new_issue = self.jira.create_issue(project=self.project_id, summary=summary, description=description, issuetype=issuetype) if hasattr(new_issue, 'id') and hasattr(new_issue, 'key'): submitissue = { 'project': self.project_id, 'id': new_issue.id, 'key': new_issue.key } return submitissue
def version_update_reporter(bot): jira_access = JIRA( server="https://bugs.mojang.com", basic_auth=(mojira_username, mojira_password), ) # All the different minecraft versions on the bug tracker versions = jira_access.project("MC").versions embed = discord.Embed(color=discord.Colour.magenta()) # We check for the different kinds of version changes. # We can just return after that since updates between the different version updates will never be faster # than the period of the loop. # Check for new versions, if there are we send the embed containing that info. new = new_version(bot, versions) if new: embed.title = str(new) embed.add_field(name="Affected", value=str(affected_bugs(str(new), jira_access))) embed.add_field(name="Fixed", value=str(fixed_bugs(str(new), jira_access))) content = "Version **{}** has just been created!".format(str(new)) jira_access.close() return embed, content # Check for archived versions, if there are we send the embed containing that info. archived = archived_version(bot, versions) if archived: embed.title = str(archived) embed.add_field(name="Affected", value=str(affected_bugs(str(archived), jira_access))) embed.add_field(name="Fixed", value=str(fixed_bugs(str(archived), jira_access))) embed.add_field(name="Released", value=str(archived.released)) content = "Version **{}** has just been archived!".format(str(archived)) jira_access.close() return embed, content # Check for released versions, if there are we send the embed containing that info. released = released_version(bot, versions) if released: embed.title = str(released) embed.add_field(name="Affected", value=str(affected_bugs(str(released), jira_access))) embed.add_field(name="Fixed", value=str(fixed_bugs(str(released), jira_access))) embed.add_field(name="Released", value=str(released.released)) content = "Version **{}** has just been released!".format(str(released)) jira_access.close() return embed, content jira_access.close() return None
def create_issue(log_info, ): """ :param log_info: (account,password) :param project: 项目名称 eg: SQEAP https://jira.nevint.com/browse/SQEAP-483 :param issuretype: 选择范围:Bug、Story、Epic、Task、Sub-task 具体看项目 :param priority: 选择范围:P1 - Critical、P2 - High、P3 - Medium、P4 - Low、P5 - Undetermined :param summary: issue标题 :param versions:iusse对应版本 Affects Version/s 选择范围见新建issue Affects Version选项 :param customfield_10903: 发现方式 How Found 选择范围见新建issue How Found选项 :param components:选择范围见新建issue Component选项 :param assignee: 指派人账号(邮箱前缀) :param description: issue描述 :param attachment: 附件的本地连接 :param fix_day: 解决问题所需天数(工作日) :return: """ options = {'server': 'https://jira.xxx.com/'} jira = JIRA(options, basic_auth=log_info) now = datetime.datetime.now().date() fix_day = 3 end_day = now + datetime.timedelta(days=1) while fix_day > 0: # 在创建日基础上增加预期的工作日 end_day += datetime.timedelta(days=1) if end_day.weekday() not in [0, 1, 2, 3, 4]: continue else: fix_day -= 1 issue_dict = { 'project': {'key': 'SQEAP'}, # 项目 'issuetype': {'name': 'Bug'}, # 问题类型 'priority': {'name': 'P3 - Medium'}, # 优先级 'summary': '自动化创建jira测试', # 问题主题 'versions': [{'name': '123'}], # 解决版本 'customfield_10903': {'value': 'Automation'}, # 发现方式 'components': [{"name": "aaa"}], # 相关模块 'assignee': {'name': 'xxx'}, # 经办人 'description': '自动化创建jira测试', # 问题描述 'customfield_10300': str(now), # 计划开始时间 'customfield_10405': str(end_day), # 计划结束时间 } rsp = jira.create_issue(issue_dict) issue = jira.issue(rsp) jira.add_attachment(issue=issue, attachment='pytest_learn.zip') jira.close()
async def vote(bug): jira_access = JIRA( server='https://bugs.mojang.com', basic_auth=(mojira_username, mojira_password), ) try: await jira_access.add_vote(bug) except Exception as e: print(e) issue = jira_access.issue(bug) votes = issue.fields.votes embed = limited_bug(bug) embed.color = discord.Colour.teal() embed.description = f'Issue {issue} has been voted on.\n' \ f'The issue now has a total of {votes} votes.' jira_access.close() return embed
def get_jira_dict(ticket_ids): ''' takes in set of ticket_ids and returns dictionary with ticket_ids as keys and API data as values ''' # initialize ticket dictionary tickets = {} # initialize connection auth = (config['jira_username'], config['jira_password']) options = {"server": config['jira_url'], "verify": config['certificate']} jira = JIRA(auth=auth, options=options) # iterate through ticket ids from set for ticket_id in ticket_ids: # 0 should be default in YAML file (i.e. no tickers recorded) # if there is a 0 entry then that should be the only "ticket", so break if ticket_id == 0: tickets[0] = { 'ticket_name': 'No ticket on file', 'ticket_url': None } continue # get ticket info from jira API try: issue = jira.issue(ticket_id) ticket_name = issue.fields.summary except Exception as e: print("Jira API Call Error: ", e) ticket_name = ticket_id finally: ticket_url = config['jira_url'] + "/browse/" + str(ticket_id) tickets[ticket_id] = { 'ticket_name': ticket_name, 'ticket_url': ticket_url } jira.close() return tickets
def load_worklog(issue_key): try: # Assign Jira configuration j_config = configparser.ConfigParser() j_config.read('config.ini') username = j_config['jira_config']['username'] secret_token = j_config['jira_config']['secret_token'] server = j_config['jira_config']['server'] # Instantiate Jira connection jira = JIRA(server, basic_auth=(username, secret_token)) worklogs = jira.worklogs(issue_key) jira_model.delete_worklog(issue_key) for w in worklogs: time_spent = w.timeSpentSeconds author = w.author.name created = w.created started = w.started if hasattr(w, "comment"): comment = w.comment else: comment = None updated = w.updated id = w.id logger.info("{}: {} seconds tracked on worklog #{}".format( issue_key, time_spent, id)) jira_model.insert_worklog(id, issue_key, comment, created, started, author, time_spent) return except: logger.exception("Message") finally: jira.close() return
def main(): jira = JIRA(**config.JIRA) jql = (f'project = {JIRA_PROJECT} AND type = "Backup & Restore" AND ' f'status NOT IN (Closed, Rejected, Resolved)') opened_issues = [] for issue in jira.search_issues(jql, maxResults=False): comments = jira.comments(issue) opened_issues.append(Issue(issue, comments)) if opened_issues: subject = f'JIRA ({JIRA_PROJECT}) | Active tasks' body = config.SYSINFR_TEMPLATE.render(project=JIRA_PROJECT, issues=opened_issues, wiki=config.SETTINGS['wiki']) email.notify(subject=subject, message=body) logger.info(f'{len(opened_issues)} tasks are found') else: logger.info('nothing is found') jira.close()
def main(): jira = JIRA(**config.JIRA) jql = ( f'project={JIRA_PROJECT} AND summary ~ JobSummary AND status = Open ' f'AND created > startOfDay(-{LOOKUP_DAYS}) AND created < now() ' f'ORDER BY key DESC') opened_issues = defaultdict(list) for issue in jira.search_issues(jql, maxResults=False): services = (config.SETTINGS['sox_services'] + config.SETTINGS['admin_services']) for service_name in services: if service_name in issue.fields.summary: assignee = issue.fields.assignee.name email_domain = config.SETTINGS['smtp']['domain'] email_address = f'{assignee}@{email_domain}' issue.fields.summary = service_name break else: logger.error(f'there is unknown service ({issue.fields.summary})') continue comments = jira.comments(issue) opened_issues[email_address].append(Issue(issue, comments)) for email_address, issues in opened_issues.items(): subject = f'{issues[0].summary} | Backup monitoring' body = config.SOX_TEMPLATE.render(project=JIRA_PROJECT, issues=issues, wiki=config.SETTINGS['wiki']) recipients = config.SMTP_PARAMS['to'] + [email_address] email.notify(subject=subject, to=recipients, message=body) logger.info(f'{len(opened_issues)} tasks are found') if not opened_issues: logger.info('nothing is found') jira.close()
async def fixes_handler(bot): jira_access = JIRA( server='https://bugs.mojang.com', basic_auth=(mojira_username, mojira_password), ) # First we get the list of newly resolved bugs that we're interested in fixes_list, wont_fix_list, jira_access = new_fixes(jira_access) bug_fix_channel = bot.get_channel(fixed_bug_channel_id) # Then we check for duplicates and finally we build the embed and send it to discord if fixes_list: for bug in fixes_list: if not await duplicate_checker(bot, bug): embed, status = fixed_bug_embed(jira_access, bug, status='Fixed') await bug_fix_channel.send(content=status, embed=embed) if wont_fix_list: for bug in wont_fix_list: if not await duplicate_checker(bot, bug): embed, status = fixed_bug_embed(jira_access, bug, status="Won't Fix") await bug_fix_channel.send(content=status, embed=embed) jira_access.close()
print('NOT FOUND: {} ({}), {}'.format( user_name, user_displayname, user_email)) else: pprint.pprint(person) # display error else: if 'userPrincipalName' in person: user_principal_name = person['userPrincipalName'] or '' if user_principal_name != '': # pprint.pprint(person) job_title = person['jobTitle'] or 'None' office_location = person['officeLocation'] or 'None' if args.verbose: print('FOUND: ' + person['userPrincipalName'] + ', ' + job_title + ', ' + office_location) jira_client.close() c = httplib2.Http() c.add_credentials(args.username, args.password) now = datetime.datetime.now(tzutc()) for not_found in not_found_list: user_name = not_found['user_name'] user_displayname = not_found['user_displayname'] user_email = not_found['user_email'] latest_date = now user_request = args.server + '/activity?maxResults=1&os_authType=basic&streams=user+IS+' + user_name response, content = c.request(user_request, 'GET') root = ET.fromstring(content)
def main(): jira = JIRA(**config.JIRA) commvault = Commcell(**config.COMMVAULT) job_controller = JobController(commvault) for service_name in config.SETTINGS['sox_services']: client_group = ClientGroup(commvault, service_name) clients = client_group.associated_clients issues = [] for client_name in clients: jobs = job_controller.all_jobs( client_name=client_name, job_summary='full', limit=config.SETTINGS['commvault']['jobs_limit'], lookup_time=config.SETTINGS['commvault']['lookup_time'], ) for job_id in jobs: job = jobs[job_id] job_status = job['status'].lower() job_failed_files = job['totalFailedFiles'] job_failed_folders = job['totalFailedFolders'] if (job_status == 'completed' and (not (job_failed_files or job_failed_folders) or job['appTypeName'] == 'Virtual Server')): continue issue = { 'job_id': job_id, 'client': client_name, 'status': job_status, 'percent': job['percentComplete'], 'reason': '', 'comment': '', } logger.info(f'client={issue["client"]} ' f'job_id={issue["job_id"]} ' f'status={issue["status"]} ' f'failed_files={job_failed_files} ' f'failed_folders={job_failed_folders}') if job_status in ['running', 'waiting']: message = f'Progress: {job["percentComplete"]}%' issue['comment'] = make_comment(issue, message) elif job_status in [ 'pending', 'failed', 'killed', 'suspended', 'failed to start' ]: issue['reason'] = job['pendingReason'] pattern = 'backup activity for subclient .+ is disabled' if re.match(pattern, issue['reason'], flags=re.IGNORECASE): issue['reason'] = ('Backup activity for subclient ' 'is disabled') elif (job_status == 'completed' and (job_failed_files or job_failed_folders)): issue['reason'] = (f'Failed to back up: ' f'{job_failed_folders} Folders, ' f'{job_failed_files} Files') elif (job['appTypeName'] == 'Virtual Server' and job_status == 'completed w/ one or more errors'): issue['reason'] = job_status job_detail = job_controller.get( job_id).details['jobDetail'] vms = job_detail['clientStatusInfo']['vmStatus'] # After restoring VM with new name, Commvault renames old client name # For example, src: srv-tibload-001, dest: srv-tibload-001_20102020 client_vm_name = client_name.split('_')[0] vm_found = False for vm in vms: if vm['vmName'].startswith(client_vm_name): vm_found = True issue['reason'] = vm['FailureReason'] break if not vm_found: logger.error(f'{client_vm_name} is not found ' f'in the job ({job_id})') elif job_status == 'completed w/ one or more errors': issue['reason'] = job['pendingReason'] elif job_status == 'committed': issue['reason'] = ('Job was cancelled, but ' 'some items successfully backed up') else: logger.error(f'undefined job: {job}') if issue['reason'] and not issue['comment']: for error in config.SETTINGS['known_errors']: if error.lower() in issue['reason'].lower(): link = config.SETTINGS['wiki'] + '/display/IDG/' link += '+'.join(error.split()) message = f'[{error}|{link}]' issue['comment'] = make_comment(issue, message) break issues.append(issue) comment = '' issue_can_be_closed = True for issue in issues: if not issue['comment']: issue_can_be_closed = False reason = make_comment(issue, issue['reason']) comment += f'{reason}\n' else: comment += f'{issue["comment"]}\n' if not comment: comment = 'No problem was found' jql = (f'project = SOX AND ' f'summary ~ "JobSummary_\\\\[{service_name}\\\\]" AND ' f'created >= startOfDay()') issue = jira.search_issues(jql, validate_query=True)[0] issue_status = issue.fields.status.name.lower() if issue_status == 'open': jira.add_comment(issue.key, comment) comment = comment.replace('\n', '|') if issue_can_be_closed: # list of transitions /rest/api/2/issue/${issueIdOrKey}/transitions jira.transition_issue(issue=issue.key, transition='Close') logger.info(f'{service_name} ({issue.key}) has been closed') else: logger.info( f'{service_name} ({issue.key}) has already been closed') jira.close() commvault.logout()
def sprint_report(board_id, sprint_id): # Calculate Incompleted Issues sum_incompl = jira.incompletedIssuesEstimateSum(board_id, sprint_id) # Completed sum_compl = jira.completedIssuesEstimateSum(board_id, sprint_id) # Print all print "Sum Incompleted Issues: ", sum_incompl print "Sum Completed Issues ", sum_compl if __name__ == '__main__': jira = JIRA(options, basic_auth=(jira_user, jira_apikey)) # collect_all_history(issues_in_proj) # print("#####-----------------#####") # print(jira.sprint_info(0, get_sprint_id(jira, "CCP Dev Sprint 40", dev_sprint_board_id))) print("#####-----------------#####") all_sprints = get_all_sprints() print(all_sprints) print("#####-----------------#####") print(all_sprints[-1]['start_date']) # sprint_report(dev_sprint_board_id, get_sprint_id(jira, "CCP Dev Sprint 40", dev_sprint_board_id)) jira.close() print("--- Execution time: %s seconds ---" % (time.time() - start_time))
class JiraClient(object): """Класс клиента для работы с джирой.""" def __init__(self, host, login, password): self.host = host self.basic_auth = (login, password) self.jira = JIRA(server=self.host, basic_auth=self.basic_auth) self.issues = Issues(connection=self.jira) def search_issues(self, jql): """Поиск задач в джира, подходящих под заданный jql запрос.""" return self.issues.search_issues(jql=self.decode_query(query=jql)) def worklogs_to_excel(self, filename, jql, startrow=0, startcol=0): """Запись собранных ворклогов в эксель файл. Args: filename: Наименование файла с отчетом. jql: Текст запроса, для результатов которого необходимо записать ворклоги. startrow: Номер левого ряда для таблицы. startcol: Номер левой строки для таблицы. """ worklogs_table = WorklogsTable(jira_client=self) for issues_list in self.issues.all_issues.values(): for issue in issues_list: worklogs_table.insert_data_for_issue_into_table(issue=issue) worklogs_table.insert_jql_into_table(jql=self.decode_query(query=jql)) worklogs_by_author_table = WorklogsByAuthorTable(jira_client=self) for info in self.issues.worklogs_by_author.by_author.values(): worklogs_by_author_table.insert_data_for_author_into_table( by_author=info) worklogs_by_issue_for_author_table = WorklogsByIssueForAuthorTable( jira_client=self) for by_author in self.issues.worklogs_by_author.by_author.values(): for by_issue in by_author.worklogs_by_issue.values(): worklogs_by_issue_for_author_table.insert_data_for_issue_into_table( issue=by_issue) directory = str(Path( __file__).parent.parent.absolute()) + os.sep + 'reports' + os.sep worklogs_table.to_excel(directory=directory, filename=filename, startrow=startrow, startcol=startcol, sheet_name='Worklogs') worklogs_by_issue_for_author_table.to_excel( directory=directory, filename=filename, startrow=startrow, startcol=startcol, sheet_name='WorklogsByIssue') worklogs_by_author_table.to_excel(directory=directory, filename=filename, startrow=startrow, startcol=startcol, sheet_name='WorklogsByAuthor') @staticmethod def sec_to_hours_mins(s): hours = int(s // 3600) mins = (s % 3600) // 60 if mins == 0: return hours else: mins = str(mins / 60)[2:] return float("{}.{}".format(hours, mins)) @staticmethod def sec_to_mins(s): return int(str(s // 60)[:-2]) def decode_query(self, query): if platform.system() == 'Windows': for encode, decode in [['cp866', 'utf-8'], ['cp1251', 'utf-8'], ['cp866', 'cp1251']]: decoded_query = self.try_decode(string=query, encode=encode, decode=decode) if decoded_query is not None: return decoded_query return query @staticmethod def try_decode(string, encode, decode, exceptions=(UnicodeEncodeError, UnicodeDecodeError)): try: return string.encode(encode).decode(decode) except exceptions: return None def close_connection(self): print('Закрытие сессии Jira') self.jira.close()
class JiraSession(object): def __init__(self, server, account, password, verify=True): """ Init Jira Session :param server: :param account: :param password: :param verify: """ self.__server = server self.__account = account self.__password = password self.__jira_opts = { 'server': self.__server, 'verify': verify, } self.__session = JIRA(self.__jira_opts, basic_auth=(self.__account, self.__password)) def __enter__(self): assert self.__session.current_user() == self.__account return self def __exit__(self, exc_type, exc_val, exc_tb): self.__session.close() def get_user(self): """ Get jira user :return: """ logging.info(u'Get JIRA Current User') return self.__session.current_user() def search_issues(self, jql): """ Search issues via JQL :param jql: :return: """ logging.info(u'JIRA Search: %s' % jql) return self.__session.search_issues(jql_str=jql, maxResults=128, json_result=True) # def get_issue_count(self, jql: str, issue_summary: dict, issue_key: str): # """ # Search issues via JQL and return count # :param jql: # :param issue_summary: # :param issue_key: # :return: # """ # logging.info(u'JIRA Issue Count: %s' % jql) # issue_summary[issue_key] = int(self.search_issues(jql).get('total')) # return True def get_projects(self): """ Get jira projects :return: <key, name, id> """ logging.info(u'Get JIRA Projects') return self.__session.projects() def get_sprints(self): """ Get jira sprints :return: <name, id> """ logging.info(u'Get JIRA Sprints') jira_sprints = list() for board in self.__session.boards(): _sprints = self.__session.sprints(board.id) jira_sprints = jira_sprints + _sprints return jira_sprints def get_issue_fields(self): """ Get jira fields :return: [{'name':'','id':''}] """ logging.info(u'Get JIRA Fields') _fields = list() for _field in self.__session.fields(): _fields.append({'name': _field['name'], 'id': _field['id']}) return _fields def get_issue_types(self): """ Get jira issue types :return: <name, id> """ logging.info(u'Get JIRA Issue Types') return self.__session.issue_types() def get_issue_statuses(self): """ Get issue statuses :return: <name, id> """ logging.info(u'Get JIRA Issue Statuses') return self.__session.statuses() def get_project_versions(self, pid: str): """ Get project versions :param pid: :return: [<name, id>] """ logging.info(u'Get JIRA Project %s Versions' % pid) return self.__session.project_versions(project=pid)
class Manager(object): """Issue manager.""" JINJA_ENV = jinja2.Environment(loader=jinja2.FileSystemLoader(ROOT_DIR)) SUMMARY_TMPL = JINJA_ENV.get_template("templates/summary.template") DESCRIPTION_TMPL = JINJA_ENV.get_template("templates/description.template") DESCRIPTION_BOUNDARY = "_-- Alertmanager -- [only edit above]_" # Order for the search query is important for the query performance. It relies # on the 'alert_group_key' field in the description that must not be modified. SEARCH_QUERY = ('project = "{project}" and ' 'issuetype = "{issuetype}" and ' 'labels = "alert" and ' "status not in ({status}) and " 'labels = "jiralert:{group_label_key}"') logger = logging.getLogger(__name__) def __init__( self, basic_auth=None, server=None, resolve_transitions=(), resolved_status=(), threadpool=None, ): self.jira = None self.basic_auth = basic_auth self.server = server self.resolve_transitions = resolve_transitions self.resolved_status = resolved_status self.threadpool = threadpool # TODO: Keep an history of the last handled payloads and associated tickets. # (updated or created). Display that on the UI. self.history = collections.deque(20 * [None], 20) def connect(self): self.logger.info("Connecting to %s" % self.server) self.jira = JIRA(basic_auth=self.basic_auth, server=self.server) self.logger.info("Connected to %s" % self.server) def ready(self): return bool(self.jira) def shutdown(self): self.jira.close() self.jira = None if self.threadpool: self.threadpool.stop() self.threadpool = None def record(self, project, issue_type, request, response): event = Event(project, issue_type, request, response) self.history.appendleft(event) def response(self, status, code, issues=None): return {"status": status, "issues": issues}, code @jira_errors_transitions.count_exceptions() @jira_request_time_transitions.time() def transitions(self, issue): return self.jira.transitions(issue) @jira_errors_close.count_exceptions() @jira_request_time_close.time() def close(self, issue, tid): return self.jira.transition_issue(issue, tid) @jira_errors_update.count_exceptions() @jira_request_time_update.time() def update_issue(self, issue, summary, description, tags): custom_desc = issue.fields.description.rsplit( self.DESCRIPTION_BOUNDARY, 1)[0] # Merge expected tags and existing ones fields = {"labels": list(set(issue.fields.labels + tags))} return issue.update( summary=summary, fields=fields, description="%s\n\n%s\n%s" % (custom_desc.strip(), self.DESCRIPTION_BOUNDARY, description), ) @jira_errors_create.count_exceptions() @jira_request_time_create.time() def create_issue(self, project, issue_type, summary, description, tags): return self.jira.create_issue({ "project": { "key": project }, "summary": summary, "description": "%s\n\n%s" % (self.DESCRIPTION_BOUNDARY, description), "issuetype": { "name": issue_type }, "labels": tags, }) @request_time_generic_issues.time() def post_issues(self, payload): """ This endpoint accepts a JSON encoded notification according to the version 3 or 4 of the generic webhook of the Prometheus Alertmanager. """ common_labels = payload["commonLabels"] if "issue_type" not in common_labels or "project" not in common_labels: self.logger.error( "/issue, required commonLabels not found: issue_type or project" ) project = None issue_type = None resp = self.response( "Required commonLabels not found: issue_type or project", 400) else: issue_type = common_labels["issue_type"] project = common_labels["project"] resp = self.do_file_issue(project, issue_type, payload) self.record(project, issue_type, payload, resp) return resp @request_time_qualified_issues.time() def post_issues_with_project(self, project, issue_type, payload): """ This endpoint accepts a JSON encoded notification according to the version 3 or 4 of the generic webhook of the Prometheus Alertmanager. """ if payload["version"] not in ["3", "4"]: self.logger.error("/issue, unknown message version: %s" % payload["version"]) resp = self.response( "unknown message version %s" % payload["version"], 400) else: resp = self.do_file_issue(project, issue_type, payload) self.record(project, issue_type, payload, resp) return resp def update_or_resolve_issue(self, project, issue_type, issue, resolved, summary, description, tags): """Update and maybe resolve an issue.""" is_closed = False self.logger.debug("issue (%s, %s), jira issue found: %s" % (project, issue_type, issue.key)) # Try different possible transitions for resolved incidents # in order of preference. Different ones may work for different boards. if resolved: valid_trans = [ t for t in self.transitions(issue) if t["name"].lower() in self.resolve_transitions ] if valid_trans: self.close(issue, valid_trans[0]["id"]) is_closed = True else: self.logger.warning("Unable to find transition to close %s" % issue) # Update the base information regardless of the transition. self.update_issue(issue, summary, description, tags) self.logger.info("issue (%s, %s), %s updated" % (project, issue_type, issue.key)) return is_closed def do_file_issue(self, project, issue_type, payload): if not self.ready(): return self.response("Not ready yet", 503) if payload["version"] not in ["3", "4"]: self.logger.error("issue (%s, %s), unknown message version: %s" % (project, issue_type, payload["version"])) return self.response( "unknown message version %s" % payload["version"], 400) if self.threadpool: # We want a separate thread pool here to avoid blocking incoming # requests. self.threadpool.callInThread(self.do_file_issue_async, project, issue_type, payload) return self.response("OK (async)", 201) else: issues = self.do_file_issue_sync(project, issue_type, payload) return self.response("OK", 200, issues) def do_file_issue_async(self, project, issue_type, data): try: issues = self.do_file_issue_sync(project, issue_type, data) resp = self.response("OK", 200, issues) except JIRAError as e: resp = self.response(str(e), 503) # Record a fake response for async requests. self.record(project, issue_type, data, resp) @staticmethod def prepare_data(data): # Patch data to make sure it has all we need. data = copy.deepcopy(data) if "alerts" not in data: data["alerts"] = [] for alert in data["alerts"]: # Generate a short hash to make sorting more stable. simple_alert = copy.deepcopy(alert) # Remove things that change all the time. if "startsAt" in simple_alert: del simple_alert["startsAt"] if "endsAt" in simple_alert: del simple_alert["endsAt"] alert["hash"] = hashlib.sha1( json.dumps(simple_alert, sort_keys=True).encode()).hexdigest()[:8] return data @errors.count_exceptions() def do_file_issue_sync(self, project, issue_type, data): issues = {"created": [], "found": [], "updated": [], "resolved": []} self.logger.info("issue: %s %s" % (project, issue_type)) data = self.prepare_data(data) resolved = data["status"] == "resolved" tags = prepare_tags(data["commonLabels"]) tags.append("jiralert:%s" % prepare_group_label_key(data["groupKey"])) description = self.DESCRIPTION_TMPL.render(data) summary = self.SUMMARY_TMPL.render(data) # If there's already a ticket for the incident, update it and close if necessary. query = self.SEARCH_QUERY.format( project=project, issuetype=issue_type, status=",".join(self.resolved_status), group_label_key=prepare_group_label_key(data["groupKey"]), ) self.logger.debug(query) result = self.jira.search_issues(query) or [] # sort issue by key to have them in order of creation. sorted(result, key=lambda i: i.key) issues["found"] = [issue.permalink() for issue in result] for issue in result: is_closed = self.update_or_resolve_issue(project, issue_type, issue, resolved, summary, description, tags) issues["resolved" if is_closed else "updated"].append( issue.permalink()) if not result: # Do not create an issue for resolved incidents that were never filed. if not resolved: issue = self.create_issue(project, issue_type, summary, description, tags) issues["created"].append(issue.permalink()) self.logger.info("issue (%s, %s), new issue created (%s)" % (project, issue_type, issue.key)) return issues
def jiraConnector(start_Date, end_Date): global jiraData, workLog, workLogAlpha, option, filename, startDate, endDate if ((startDate == date.fromisoformat(start_Date)) and endDate == date.fromisoformat(end_Date)): raise PreventUpdate #else: #dayCount = days print("-----------------------------------------------------------------") initial = 0 size = 100 startDate = date.fromisoformat(start_Date) #startDate = startDate.strftime('%Y-%m-%d') startDate = date.fromisoformat(start_Date) #endDate = endDate.strftime('%Y-%m-%d') endDate = date.fromisoformat(end_Date) jql = 'worklogDate >= ' + start_Date + ' AND worklogDate <= ' + end_Date print(jql) #Read credentials credentialFile = open('Credentials.txt', 'r') line = credentialFile.read().splitlines() credentialFile.close() jira = JIRA(options={'server': line[0]}, basic_auth=(line[1], line[2])) #Connecting to Jira cloud #jql='worklogDate >= -'+str(days)+'d' #The JQL on which the whole data is retrieved data_jira = [] work_log = [] option.clear() #Retrieving tickets from jira cloud and appending to a list #Since the maximum count of ticket retrieved at a time in 100 we loop to get all the tickets or specific start and end count t0 = time.time() while True: start = initial * size #Initial start of ticket count #Fields to get jira_search = jira.search_issues(jql, startAt=start, maxResults=size, fields = "key, summary, issuetype, assignee, reporter, status, created, resolutiondate, workratio, timespent, timeoriginalestimate") if(len(jira_search) == 0): break #Geting all the specificed fields of the tickets in a list for issue in jira_search: timeSpent = 0 issue_logDateTime = datetime.today() #Retrieving the worklog only in the current range #This makes a significant impact on the programs execution time for w in jira.worklogs(issue.key): started = datetime.strptime(w.started[:-5], '%Y-%m-%dT%H:%M:%S.%f') if not (startDate <= started.date() <= endDate): continue timeSpent = timeSpent + w.timeSpentSeconds issue_logDateTime = datetime.strptime(w.started[:-5], '%Y-%m-%dT%H:%M:%S.%f') issue_key = issue.key issue_summary = issue.fields.summary #Summary request_type = str(issue.fields.issuetype) #Issue type datetime_creation = issue.fields.created if datetime_creation is not None: datetime_creation = datetime.strptime(datetime_creation[:19], "%Y-%m-%dT%H:%M:%S") #Creation datetime datetime_resolution = issue.fields.resolutiondate if datetime_resolution is not None: datetime_resolution = datetime.strptime(datetime_resolution[:19], "%Y-%m-%dT%H:%M:%S") #End datetime #Reporter specification reporter_login = None reporter_name = None reporter = issue.raw['fields'].get('reporter', None) if reporter is not None: reporter_login = reporter.get('key', None) reporter_name = reporter.get('displayName', None) assignee_login = None assignee_name = None assignee = issue.raw['fields'].get('assignee', None) if assignee is not None: assignee_login = assignee.get('key', None) assignee_name = assignee.get('displayName', None) status = None st = issue.fields.status if st is not None: status = st.name #Work ratio and time logged issue_workratio = issue.fields.workratio issue_timespent = timeSpent issue_estimate = issue.fields.timeoriginalestimate data_jira.append((issue_key, issue_summary, request_type, datetime_creation, datetime_resolution, reporter_login, reporter_name, assignee_login, assignee_name, status, issue_estimate, issue_timespent, issue_logDateTime)) initial = initial + 1 #Get the next 100 count #List to dataframe executionTime = time.time() - t0 jiraData = pd.DataFrame(data_jira, columns=['Issue key', 'Summary', 'Request type', 'Datetime creation', 'Datetime resolution', 'Reporter login', 'Reporter name', 'Assignee login', 'Assignee name', 'Status', 'Estimate', 'Time spent', 'Logged Date Time']) del data_jira jira.close() #Preparing a dataframe (workLogAlpha) for downloading the summary report that shows the estimates and utilization of users in percentage workLog = jiraData.groupby(['Assignee name']).sum() jiraData[['Time spent [Hours]', 'Estimate [Hours]']] = jiraData[['Time spent', 'Estimate']].div(3600).round(2) jiraData = jiraData[['Reporter name', 'Assignee name', 'Time spent', 'Time spent [Hours]', 'Estimate', 'Estimate [Hours]', 'Summary', 'Issue key', 'Logged Date Time']] workLog[['Time spent', 'Estimate']] = workLog[['Time spent', 'Estimate']].div(3600) workLog.reset_index(inplace=True) workLog['Assignee name'] = workLog['Assignee name'].str.replace('.', ' ') workLog['Assignee name'] = workLog['Assignee name'].str.title() jiraData['Assignee name'] = jiraData['Assignee name'].str.replace('.', ' ') jiraData['Assignee name'] = jiraData['Assignee name'].str.title() #Leaves Data leaves = pd.read_excel('.\Input\Attendence.xlsx', na_values=['Present', 'Absent', 'Present, Time out is missing', 'Present, Request Pending', 'Holiday', 'Absent, Request Pending']) leaves = leaves[['Employee', 'Schedule Date IN', 'Remarks']] leaves.dropna(inplace=True) leaves = leaves.replace({'Employee': {'Muhammad Haris Shaikh': 'Haris Sheikh', 'Shaikh Muhammad Arsalan': 'Shaikh Arsalan', 'Rabail Muhammad Ali': 'Rabail Ali', 'Syed Muhammad Asaad Khurshid': 'Syed Asaad', 'Syeda Mahham Batool': 'Maham Batool'}}) gazettedHoliday = leaves[leaves['Employee'] == 'Muhammad Bilal Khan'] gazettedHoliday = gazettedHoliday[gazettedHoliday['Remarks'] == 'GazettedHoliday']['Schedule Date IN'].tolist() onLeaves = leaves[leaves['Remarks'] == 'On Leave'] onLeaves.drop('Remarks', axis=1, inplace=True) leavesDict = {} for index, row in onLeaves.iterrows(): leavesDict[row.Employee] = onLeaves[onLeaves['Employee'] == row.Employee] groups = leaves.groupby('Employee').count().mul(8) groups.reset_index(inplace=True) groups.rename(columns={'Employee': 'Assignee name', 'Remarks': 'Leaves'}, inplace=True) #Using PMs list for names and departments defaultDf = pd.read_excel('.\Input\Employee List PM.xlsx') defaultDf.rename(columns = {'Name': 'Assignee name'}, inplace=True) workLog = fuzzy_merge(workLog, defaultDf, 'Assignee name', 'Assignee name', 80, 1) defaultDf = defaultDf.merge(workLog, on='Assignee name', how='left') groups = fuzzy_merge(groups, defaultDf, 'Assignee name', 'Assignee name', 90, 1) defaultDf = defaultDf.merge(groups, on='Assignee name', how='left') defaultDf.fillna(0, inplace = True) defaultDf['EndDate'] = endDate defaultDf['StartDate'] = startDate del workLog #Calculating Allocation and Utilization defaultDf['Time spent'] = defaultDf['Time spent'].div(80-defaultDf.Leaves).round(4) defaultDf['Estimate'] = defaultDf['Estimate'].div(80-defaultDf.Leaves).round(4) defaultDf.replace({'Time spent': np.inf}, 0, inplace=True) defaultDf.replace({'Estimate': np.inf}, 0, inplace=True) defaultDf.rename(columns = {'Estimate':'Allocation', 'Time spent': 'Utilization'}, inplace=True) #Sanity Checks print("No leaves for: ", len(defaultDf[defaultDf['Leaves'] == 0.00])) print("No name matched for: ", len(defaultDf[defaultDf['matches_x'] == 0])) #Department wise segregation departments = {} departments['BA'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'BA'] departments['BI Engineer'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'BI Engineer'] departments['Data Engineer'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'Data Engineer'] departments['Dev Ops'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'DevOps'] departments['Project Manager'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'PM'] departments['SQA'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'SQA'] departments['Support'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'Support'] departments['Web Dev'] = defaultDf[['Assignee name', 'Allocation', 'Utilization']][defaultDf['Department'] == 'Web Dev'] groups = defaultDf.groupby(['Department']).mean() groups.drop(['Leaves', 'Schedule Date IN'], axis=1, inplace=True) groups.reset_index(inplace=True) for team in departments: departments[team].replace({'Allocation': 0}, 'Estimates Missing', inplace=True) departments[team].replace({'Utilization': 0}, 'Estimates Missing', inplace=True) #Excel engine for report generation #Setting filename and dataframes filename = 'Delivery Report ' + str(datetime.strftime(startDate, "%d%b%Y")) + '.xlsx' df = pd.DataFrame() writer = pd.ExcelWriter('.\Output\{}'.format(filename), engine='xlsxwriter') df.to_excel(writer, index=False, sheet_name='Delivery Report') jiraData.to_excel(writer, index=False, sheet_name='Jira Data') workbook = writer.book worksheet = writer.sheets['Delivery Report'] jiraSheet = writer.sheets['Jira Data'] #Preparing formats number_rows = len(defaultDf) indent_fmt = workbook.add_format() indent_fmt.set_indent(1) total_fmt = workbook.add_format({'bg_color': '#d2fcf9', 'bold': True, 'border' : 1, 'num_format': '0.00%'}) subheader_fmt = workbook.add_format({'bg_color' : '#7030a0', 'bold': True, 'border' : 1, 'font_color': 'white'}) avg_fmt = workbook.add_format({'bg_color' : '#7030a0', 'bold': True, 'border' : 1, 'font_color': 'white', 'align': 'center'}) merge_format_header = workbook.add_format({'align': 'center', 'font_size': 22, 'bg_color' : '#7030a0', 'bold': True, 'border' : 1, 'font_color': 'white'}) hour_fmt = workbook.add_format({'bg_color': 'yellow'}) merge_format_team = workbook.add_format({'bold': True, 'border' : 1}) newline_fmt = workbook.add_format({'text_wrap': True}) # Add a format. Light red fill with dark red text. badRed = workbook.add_format({'bg_color': '#ff0000', 'font_color': 'white', 'border': 1, 'num_format': '0.00%'})#FFC7CE # Add a format. Green fill with dark green text. goodGreen = workbook.add_format({'bg_color': '#92d050', 'font_color': 'black', 'border': 1, 'num_format': '0.00%'})#C6EFCE # Add a format. Yellow fill with dark yellow text. neutralYellow = workbook.add_format({'bg_color': '#ffff00', 'font_color': 'black', 'border': 1, 'num_format': '0.00%'})#ffeb9c # Add a fromat. Blue fill with white text. missingBlue = workbook.add_format({'bg_color': '#333f4f', 'font_color': 'white', 'border': 1}) # Add a format. Grey fill with black text overGrey = workbook.add_format({'bg_color': '#aeaaaa', 'font_color': 'black', 'border': 1, 'num_format': '0.00%'}) # General format general_fmt = workbook.add_format({'border': 1, 'num_format': '0.00%'}) # General format border general_border_fmt = workbook.add_format({'border': 1}) bottom_border_fmt = workbook.add_format() bottom_border_fmt.set_bottom(1) bottom_border_fmt.set_indent(1) worksheet.write('A3', 'Resource', subheader_fmt) worksheet.write('B3', 'Allocation', subheader_fmt) worksheet.write('C3', 'Utilization', subheader_fmt) worksheet.write('E4', 'Team', subheader_fmt) worksheet.write('F4', 'Allocation', subheader_fmt) worksheet.write('G4', 'Utilization', subheader_fmt) worksheet.merge_range('E17:G17', 'No. of leaves b/w ' + str(datetime.strftime(startDate, "%b %d")) + ' - ' + str(datetime.strftime(endDate, "%b %d")), avg_fmt) worksheet.write('E18', 'Name', subheader_fmt) worksheet.merge_range('F18:G18', 'No. of Leaves', subheader_fmt) #Writing dataframes and formats worksheet.merge_range('A1:G1', 'Allocation & Utilization: ' + str(datetime.strftime(startDate, "%b %d")) + ' - ' + str(datetime.strftime(endDate, "%b %d, %Y")), merge_format_header) currentRow = 4 for team in departments: worksheet.merge_range('A{}:C{}'.format(currentRow, currentRow), team , merge_format_team) departments[team].to_excel(writer, index=False, header=False, sheet_name='Delivery Report', startrow=currentRow) allocation_range = "B{}:B{}".format(currentRow+1, len(departments[team])+currentRow) utilization_range = "C{}:C{}".format(currentRow+1, len(departments[team])+currentRow) team_range = "A{}:A{}".format(currentRow+1, len(departments[team])+currentRow) colorCells(worksheet, allocation_range, badRed, goodGreen, neutralYellow, overGrey, missingBlue) colorCells(worksheet, utilization_range, badRed, goodGreen, neutralYellow, overGrey, missingBlue) generalFormat(indent_fmt, team_range, worksheet) currentRow = currentRow + len(departments[team])+1 generalFormat(bottom_border_fmt, "A{}:A{}".format(currentRow-1, currentRow-1), worksheet) generalFormat(hour_fmt, "D1:D1", jiraSheet) generalFormat(hour_fmt, "F1:F1", jiraSheet) worksheet.merge_range('E3:G3', 'Average', avg_fmt) allocation_total = defaultDf['Allocation'].mean() utilization_total = defaultDf['Utilization'].mean() allocation_range = "F5:F{}".format(len(groups)+4) utilization_range = "G5:G{}".format(len(groups)+4) team_range = "E5:E{}".format(len(groups)+4) generalFormat(general_fmt, utilization_range, worksheet) generalFormat(general_fmt, allocation_range, worksheet) generalFormat(merge_format_team, team_range, worksheet) worksheet.write('E{}'.format(len(groups)+5), 'Total', total_fmt) worksheet.write('F{}'.format(len(groups)+5), allocation_total, total_fmt) worksheet.write('G{}'.format(len(groups)+5), utilization_total, total_fmt) groups.to_excel(writer, index=False, header=False, sheet_name='Delivery Report', startrow=4, startcol=4) currentRow = 19 if len(gazettedHoliday) > 0: combineString = "" for x in gazettedHoliday: combineString = combineString + x + "\n " worksheet.write("E{}".format(currentRow), "Gazetted Holiday", merge_format_team) # worksheet.write("F{}".format(currentRow), combineString, newline_fmt) worksheet.merge_range("F{}:G{}".format(currentRow, currentRow), len(gazettedHoliday), general_border_fmt) currentRow = currentRow + 1 for employee in leavesDict: leaveDates = leavesDict[employee]['Schedule Date IN'].tolist() worksheet.write("E{}".format(currentRow), employee, merge_format_team) combineString = "" for x in leaveDates: combineString = combineString + x + "\n " # worksheet.write("F{}".format(currentRow), combineString, newline_fmt) worksheet.merge_range("F{}:G{}".format(currentRow, currentRow), len(leaveDates), general_border_fmt) currentRow = currentRow + 1 currentRow = currentRow + 3 worksheet.merge_range('E{}:F{}'.format(currentRow, currentRow), 'Allocation/Utilization - Legend', subheader_fmt) worksheet.write('G{}'.format(currentRow), 'Range(%)', subheader_fmt) currentRow = currentRow + 1 worksheet.merge_range('E{}:F{}'.format(currentRow, currentRow), 'Under Allocated/Utilized', badRed) worksheet.write('G{}'.format(currentRow), '<50', general_border_fmt) currentRow = currentRow + 1 worksheet.merge_range('E{}:F{}'.format(currentRow, currentRow), 'Average Allocated/Utilized', neutralYellow) worksheet.write('G{}'.format(currentRow), '50-80', general_border_fmt) currentRow = currentRow + 1 worksheet.merge_range('E{}:F{}'.format(currentRow, currentRow), 'Balanced Allocated/Utilized', goodGreen) worksheet.write('G{}'.format(currentRow), '81-100', general_border_fmt) currentRow = currentRow + 1 worksheet.merge_range('E{}:F{}'.format(currentRow, currentRow), 'Over Allocated/Utilized', overGrey) worksheet.write('G{}'.format(currentRow), '>100', general_border_fmt) currentRow = currentRow + 1 worksheet.merge_range('E{}:F{}'.format(currentRow, currentRow), 'Estimates Missing/On Leaves', missingBlue) worksheet.write('G{}'.format(currentRow), '0', general_border_fmt) writer.save() #Log filename print(filename) #Python windows api to expand the sheet to adjust to the text cmd = "py Pywin32_Excel.py {}".format(filename) p = subprocess.Popen(cmd, shell=True) out, err = p.communicate() print("Returns from subprocess:", end=' ') print(err, end=' ') print(out) #Database schema #CREATE TABLE [dbo].[DeliveryReport]( # [ID] [int] IDENTITY(1,1) NOT NULL, # [Team] [varchar](20) NOT NULL, # [Name] [varchar](50) NOT NULL, # [Allocation] [float] NULL, # [Utilization] [float] NULL, # [StartDate] [date] NULL, # [EndDate] [date] NULL, #PRIMARY KEY CLUSTERED #( # [ID] ASC #)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON, OPTIMIZE_FOR_SEQUENTIAL_KEY = OFF) ON [PRIMARY] #) ON [PRIMARY] #GO #Database entry for PowerBI dashboards defaultDf[['Allocation', 'Utilization']] = defaultDf[['Allocation', 'Utilization']].mul(100).round(2) server = 'US-BCKND' database = 'Costing' conn = pyodbc.connect("DRIVER={ODBC Driver 17 for SQL Server};SERVER="+server+';DATABASE='+database+';Trusted_Connection=yes;') cursor = conn.cursor() cursor.execute("TRUNCATE TABLE DeliveryTeamReportV2") for index, row in defaultDf.iterrows(): cursor.execute("INSERT INTO DeliveryTeamReportV2 (Team, Name, Allocation, Utilization, StartDate, EndDate) values(?,?,?,?,?,?);", str(row.Department), str(row['Assignee name']), str(row.Allocation), str(row.Utilization), row.StartDate, row.EndDate) conn.commit() cursor.close() #Preparing multiple traces for default graphs workLogAlpha = defaultDf[['Assignee name', 'Department', 'Leaves', 'Allocation', 'Utilization']] #workLogAlpha[['Allocation', 'Utilization']] = workLogAlpha[['Allocation', 'Utilization']].mul(100).round(2) workLogAlpha['Leaves'] = workLogAlpha['Leaves'].div(8) workLogAlpha.rename(columns = {'Assignee name': 'Assignee', 'Department': 'Team', 'Allocation': 'Allocation (%)', 'Utilization': 'Utilization (%)'}, inplace=True) workLogAlpha['Assignee'] = workLogAlpha['Assignee'].str.replace('Muhammad ', '') #just replacing too many similar sir names jiraData = jiraData[['Issue key', 'Summary', 'Assignee name', 'Estimate [Hours]', 'Time spent [Hours]', 'Logged Date Time']] jiraData.rename(columns = {'Estimate [Hours]': 'Estimate (hrs)', 'Time spent [Hours]': 'Time spent (hrs)'}, inplace = True) #Main overall bar graph trace1 = go.Bar( x = workLogAlpha['Assignee'], y = workLogAlpha['Allocation (%)'], marker = {'color': 'rgb(199, 153, 185)'}, name = 'Estimate' ) trace2 = go.Bar( x = workLogAlpha['Assignee'], y = workLogAlpha['Utilization (%)'], marker = {'color': 'darkviolet'}, name = 'Time spent' ) figure = { 'data': [trace1, trace2], 'layout': go.Layout( title = 'Overall Time Spent', xaxis = {'title': 'Assignee'}, yaxis = {'title': 'Utilization'}, hovermode='closest' ) } #Teamwise segregated pie chart team = groups.Department.tolist() groups[['Allocation', 'Utilization']] = groups[['Allocation', 'Utilization']].mul(100).round(2) groups.rename(columns = {'Allocation': 'Allocation (%)', 'Utilization': 'Utilization (%)', 'Department': 'Team'}, inplace = True) utilize = groups['Utilization (%)'].tolist() trace3 = go.Pie(labels=team, values=utilize ) pie_figure = { 'data': [trace3], 'layout': go.Layout( title = 'Team Segregation', hovermode='closest' ) } #groups.rename(columns = {'Department': 'Team'}, inplace = True) groups.reset_index(inplace=True) #Preparing options (list of usernames in the drop down) for name in workLogAlpha.Assignee: nameDict = {} nameDict['label'] = name nameDict['value'] = name option.append(nameDict) print('{} tickets retrieved in {} seconds'.format(len(jiraData), round(executionTime, 2))) print('Refreshed on: ', datetime.today()) #Returning everything prepared so far to the dashboard return [jiraData.to_dict('records'), html.Div(style={'display': 'None'}), figure, option, pie_figure, groups.to_dict('rows'), '{} tickets retrieved in {} seconds'.format(len(jiraData), round(executionTime, 2))]