def get_lists_containing_tea(tea_lists, tea): ''' Checks if the tea is in the given lists. Returns a list of the lists where the tea is into. ''' return (TeaList.select().where( (TeaList.id << [tea_list.id for tea_list in tea_lists]) & fn.Exists( TeaListItem.select(TeaListItem.id). join(Tea, on=TeaListItem.tea_id == Tea.id).where( (Tea.id == tea.id) & (TeaList.id == TeaListItem.tea_list)))))
def __init__(self, rh_account_id, cve_cache_from, cve_cache_keepalive, list_args, uri, args): join_type = JOIN.INNER if args['affecting'] is None or False in args[ 'affecting'] or True not in args['affecting']: join_type = JOIN.LEFT_OUTER # API using cache, set keepalive for account to enable maintaining cache update_cve_cache_keepalive(rh_account_id, cve_cache_keepalive) # Use cache if not disabled + systems are not filtered + cache exists if not DISABLE_ACCOUNT_CACHE and not is_cyndi_request( args) and cve_cache_from: count_subquery = self._cached_count_subquery(rh_account_id) else: count_subquery = self._count_subquery(rh_account_id) count_subquery = cyndi_join(count_subquery) if is_cyndi_request(args): count_subquery = apply_filters(count_subquery, args, [ filter_types.SYSTEM_TAGS, filter_types.SYSTEM_SAP, filter_types.SYSTEM_SAP_SIDS, filter_types.SYSTEM_RHEL_VERSION ], {}) query = self._full_query(rh_account_id, join_type, count_subquery) query = apply_filters(query, args, [ filter_types.CVE_BUSINESS_RISK, filter_types.CVE_CVSS, filter_types.CVE_IMPACT, filter_types.CVE_PUBLIC_DATE, filter_types.CVE_RULE_PRESENCE, filter_types.CVE_STATUS, filter_types.CVE_AFFECTING, filter_types.CVE_KNOWN_EXPLOITS ], {"count_subquery": count_subquery}) query = query.dicts() sortable_columns = { "systems_affected": SQL('systems_affected'), "id": CveMetadata.id, "synopsis": CVE_SYNOPSIS_SORT, "public_date": CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 "cvss_score": Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, "impact_id": CveMetadata.impact_id, "impact": CveMetadata.impact_id, "business_risk_id": SQL('business_risk_id'), "business_risk": SQL('business_risk_id'), "status_id": SQL('status_id'), "status": SQL('status_id'), } default_sort_columns = { 'default': 'id', 'cvss_score': 'public_date', 'cvss2_score': 'public_date', 'cvss3_score': 'public_date', 'public_date': 'synopsis', 'impact': 'public_date', 'systems_affected': 'public_date' } filterable_columns = { "synopsis": CveMetadata.cve, "description": CveMetadata.description, } if list_args["filter"]: filter_expressions = { "rule_description": fn.Exists( CveRuleMapping.select(CveRuleMapping.cve_id).join( InsightsRule, on=(CveRuleMapping.rule_id == InsightsRule.id)).where( CveRuleMapping.cve_id == CveMetadata.id).where( InsightsRule.description_text.contains( list_args["filter"]))) } else: filter_expressions = {} super().__init__(query, sortable_columns, default_sort_columns, filterable_columns, filter_expressions, list_args, args, uri)
def execute_if_it_has_to(self, company): # jira doesn't need to check if it _has_to_ pending_updates = Task.select().join(Company).switch(Task).join( JiraTaskUpdated, JOIN_LEFT_OUTER).where((Company.id == company.id) & ( (~(fn.Exists(JiraTaskUpdated.select().where( JiraTaskUpdated.task == Task.id)))) | (JiraTaskUpdated.updated_at < Task.updated_at))) company_tz = pytz.timezone(company.timezone) for task in list(pending_updates): # get the ticket id current_best_position = len(task.description) current_match = None for regexp in self.ticket_regexps: match = re.search('\\b(' + regexp + ')\\b', task.description) if match is not None and match.start( 1) < current_best_position: current_best_position = match.start(1) current_match = match.group(1) if current_match is not None: issue = None try: issue = self.jira.issue(current_match) except: pass if issue is not None: # found a ticket! description = task.description if current_best_position == 0: description = re.sub( '^[^a-zA-Z0-9\\(]*', '', task.description[len(current_match):]) worklog_ready = False for worklog in self.jira.worklogs(issue.id): started = parse( worklog.started).astimezone(company_tz).date() if task.date == started and worklog.comment == description: if worklog.timeSpentSeconds != task.time_spent_seconds: worklog.update( timeSpentSeconds=task.time_spent_seconds) worklog_ready = True if not worklog_ready: # get the timezone suffix on the task's date (considering DST) task_date_with_time = datetime.datetime.combine( task.date, datetime.datetime.min.time()) suffix = company_tz.localize( task_date_with_time).strftime('%z') # make it 6pm wherever they are dt = parse( task.date.strftime('%Y-%m-%dT18:00:00') + suffix) self.jira.add_worklog( issue.id, timeSpentSeconds=task.time_spent_seconds, started=dt, comment=description) if task.jira_tasks_updated.count() == 0: task_updated = JiraTaskUpdated() else: task_updated = task.jira_tasks_updated[0] task_updated.task = task task_updated.updated_at = datetime.datetime.utcnow() task_updated.save()