def _get_sorted_matching_repositories(lookup_value, repo_kind="image", include_private=False, search_fields=None, ids_only=False): """ Returns a query of repositories matching the given lookup string, with optional inclusion of private repositories. Note that this method does *not* filter results based on visibility to users. """ select_fields = [Repository.id] if ids_only else [Repository, Namespace] if not lookup_value: # This is a generic listing of repositories. Simply return the sorted repositories based # on RepositorySearchScore. query = (Repository.select( *select_fields).join(RepositorySearchScore).where( Repository.state != RepositoryState.MARKED_FOR_DELETION). order_by(RepositorySearchScore.score.desc(), RepositorySearchScore.id)) else: if search_fields is None: search_fields = set( [SEARCH_FIELDS.description.name, SEARCH_FIELDS.name.name]) # Always search at least on name (init clause) clause = Repository.name.match(lookup_value) computed_score = RepositorySearchScore.score.alias("score") # If the description field is in the search fields, then we need to compute a synthetic score # to discount the weight of the description more than the name. if SEARCH_FIELDS.description.name in search_fields: clause = Repository.description.match(lookup_value) | clause cases = [ (Repository.name.match(lookup_value), 100 * RepositorySearchScore.score), ] computed_score = Case(None, cases, RepositorySearchScore.score).alias("score") select_fields.append(computed_score) query = (Repository.select( *select_fields).join(RepositorySearchScore).where(clause).where( Repository.state != RepositoryState.MARKED_FOR_DELETION). order_by(SQL("score").desc(), RepositorySearchScore.id)) if repo_kind is not None: query = query.where( Repository.kind == Repository.kind.get_id(repo_kind)) if not include_private: query = query.where( Repository.visibility == _basequery.get_public_repo_visibility()) if not ids_only: query = query.switch(Repository).join( Namespace, on=(Namespace.id == Repository.namespace_user)) return query
async def move_shape_order(sid: str, data: ShapeOrder): pr: PlayerRoom = game_state.get(sid) if not data["temporary"]: shape = Shape.get(uuid=data["uuid"]) layer = shape.layer if pr.role != Role.DM and not layer.player_editable: logger.warning( f"{pr.player.name} attempted to move a shape order on a dm layer" ) return target = data["index"] sign = 1 if target < shape.index else -1 case = Case( None, ( (Shape.index == shape.index, target), ( (sign * Shape.index) < (sign * shape.index), (Shape.index + (sign * 1)), ), ), Shape.index, ) Shape.update(index=case).where(Shape.layer == layer).execute() await sio.emit( "Shape.Order.Set", data, room=pr.active_location.get_path(), skip_sid=sid, namespace=GAME_NS, )
def _full_query(rh_account_id): # pylint: disable=singleton-comparison selectables = [ SystemPlatform.inventory_id, SystemPlatform.display_name, SystemPlatform.last_evaluation, SystemPlatform.advisor_evaluated.alias('rules_evaluation'), SystemPlatform.opt_out, SystemPlatform.last_upload, SystemPlatform.stale_timestamp, SystemPlatform.stale_warning_timestamp, SystemPlatform.culled_timestamp, Case(None, (((SystemPlatform.opt_out == False), SystemPlatform.cve_count_cache), ), None).alias("cve_count"), InventoryHosts.tags, InventoryHosts.updated, InventoryHosts.insights_id, OS_INFO_QUERY.alias('os'), ] return (SystemPlatform.select(*selectables).where( SystemPlatform.rh_account_id == rh_account_id).where( SystemPlatform.last_evaluation.is_null(False) | SystemPlatform.advisor_evaluated.is_null(False)).where( SystemPlatform.when_deleted.is_null(True)).dicts())
def _count_subquery(rh_account_id): # pylint: disable=singleton-comparison return (SystemVulnerabilities.select( SystemVulnerabilities.cve_id.alias("cve_id_"), fn.Count(SystemVulnerabilities.id).alias("systems_affected_"), fn.Sum( Case(None, ((SystemVulnerabilities.status_id != CveAccountData.status_id, 1), ), 0)).alias("systems_status_divergent_") ).join( SystemPlatform, on=((SystemVulnerabilities.system_id == SystemPlatform.id) & (SystemPlatform.rh_account_id == rh_account_id) & (SystemPlatform.opt_out == False) & # noqa: E712 (SystemPlatform.stale == False) & # noqa: E712 (SystemPlatform.when_deleted.is_null(True)))).join( CveAccountData, JOIN.LEFT_OUTER, on=((SystemVulnerabilities.cve_id == CveAccountData.cve_id) & (CveAccountData.rh_account_id == rh_account_id))). where(SystemVulnerabilities.rh_account_id == rh_account_id). where((SystemVulnerabilities.mitigation_reason.is_null(True)) | (SystemVulnerabilities.rule_id << InsightsRule.select( InsightsRule.id).where((InsightsRule.active == False) & (~InsightsRule.rule_only)))) .where((SystemVulnerabilities.when_mitigated.is_null(True)) | (SystemVulnerabilities.rule_id << InsightsRule.select( InsightsRule.id).where( (InsightsRule.active == True) & (~InsightsRule.rule_only)))).group_by( SystemVulnerabilities.cve_id))
def __init__(self, rh_account_id, list_args, query_args, filter_args, parsed_args, uri, ids_only=False, cve_advisories=None): query = self._full_query(rh_account_id, query_args) if not ids_only else self._id_query(rh_account_id, query_args) query = cyndi_join(query) filters = [filter_types.CVE_BUSINESS_RISK, filter_types.CVE_CVSS, filter_types.CVE_IMPACT, filter_types.CVE_PUBLIC_DATE, filter_types.SYSTEM_CVE_STATUS, filter_types.SYSTEM_CVE_RULE_PRESENCE, filter_types.SYSTEM_CVE_RULE, filter_types.CVE_KNOWN_EXPLOITS, filter_types.CVE_BY_ADVISORY, filter_types.SYSTEM_CVE_FIRST_REPORTED, filter_types.SYSTEM_CVE_ADVISORY_AVAILABLE, filter_types.SYSTEM_CVE_REMEDIATION, ] query = apply_filters(query, filter_args, filters, {'cve_advisories': cve_advisories}) query = query.dicts() sortable_columns = { 'id': CveMetadata.id, 'cve': CVE_SYNOPSIS_SORT, 'synopsis': CVE_SYNOPSIS_SORT, 'public_date': CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 'cvss_score': Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, 'impact_id': CveMetadata.impact_id, 'impact': CveMetadata.impact_id, 'status_id': Status.id, 'status': Status.id, 'business_risk_id': SQL('business_risk_id'), 'business_risk': SQL('business_risk_id'), 'first_reported': SystemVulnerabilities.first_reported, 'advisory_available': SQL('advisory_available'), 'remediation': SQL('remediation_type_id'), } default_sort_columns = { 'default': 'id', 'cvss_score': 'public_date', 'cvss2_score': 'public_date', 'cvss3_score': 'public_date', 'public_date': 'synopsis', 'impact': 'public_date', 'business_risk': 'public_date', 'status': 'public_date', } filterable_columns = { 'cve': CveMetadata.cve, 'description': CveMetadata.description, 'status': Status.name, "rule_description": InsightsRule.description_text } filter_expressions = {} super().__init__(query, sortable_columns, default_sort_columns, filterable_columns, filter_expressions, list_args, parsed_args, uri)
def __init__(self, list_args, cve_list, uri): cve_list = [remove_str_nulls(elem) for elem in cve_list] query = (CveMetadata.select(CveMetadata.cve, fn.COALESCE(CveMetadata.cvss3_score, CveMetadata.cvss2_score).alias('cvss_score'), CveImpact.name.alias('impact')) .join(CveImpact, on=(CveMetadata.impact_id == CveImpact.id)) .where(CveMetadata.cve.in_(cve_list)) .dicts()) sortable_columns = { 'synopsis': CVE_SYNOPSIS_SORT, # This assumes we only show one score, and that cvss3 wins over cvss2 'cvss_score': Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), 'impact_id': CveMetadata.impact_id, 'impact': CveMetadata.impact_id, } filterable_columns = { 'synopsis': CveMetadata.cve, } default_sort_columns = { 'default': 'synopsis' } super().__init__(query, sortable_columns, default_sort_columns, filterable_columns, {}, list_args, [], uri)
def __init__(self, list_args, query_args, filter_args, parsed_args, uri, ids_only=False): query = self._full_query(query_args) if not ids_only else self._id_query(query_args) query = cyndi_query(query) query = apply_filters(query, filter_args, [filter_types.CVE_BUSINESS_RISK, filter_types.CVE_CVSS, filter_types.CVE_IMPACT, filter_types.CVE_PUBLIC_DATE, filter_types.SYSTEM_CVE_STATUS, filter_types.SYSTEM_CVE_RULE_PRESENCE, filter_types.SYSTEM_CVE_RULE_OLD]) query = query.dicts() sortable_columns = { 'id': CveMetadata.id, 'cve': CVE_SYNOPSIS_SORT, 'synopsis': CVE_SYNOPSIS_SORT, 'public_date': CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 'cvss_score': Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, 'impact_id': CveMetadata.impact_id, 'impact': CveMetadata.impact_id, 'status_id': Status.id, 'status': Status.id, 'business_risk_id': SQL('business_risk_id'), 'business_risk': SQL('business_risk_id'), } default_sort_columns = ['id'] filterable_columns = { 'cve': CveMetadata.cve, 'description': CveMetadata.description, 'status': Status.name } super(SystemCvesView, self).__init__(query, sortable_columns, default_sort_columns, filterable_columns, list_args, parsed_args, uri)
def __init__(self, list_args, query_args, uri, args, ids_only=False): join_type = JOIN.INNER cve_count = CveAccountData.systems_affected if 'show_all' in args and args['show_all']: join_type = JOIN.RIGHT_OUTER cve_count = fn.COALESCE(cve_count, 0) query = self._full_query(cve_count, join_type, query_args) if not ids_only else self._id_query(join_type, query_args) query = apply_filters(query, args, [filter_types.CVE_BUSINESS_RISK, filter_types.CVE_CVSS, filter_types.CVE_IMPACT, filter_types.CVE_PUBLIC_DATE, filter_types.CVE_RULE_PRESENCE, filter_types.CVE_SHOW_ALL, filter_types.CVE_STATUS, filter_types.CVE_RULE_PRESENCE_OLD]) query = query.dicts() sortable_columns = { "systems_affected": SQL('systems_affected'), "id": CveMetadata.id, "synopsis": CVE_SYNOPSIS_SORT, "public_date": CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 "cvss_score": Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, "impact_id": CveMetadata.impact_id, "impact": CveMetadata.impact_id, "business_risk_id": SQL('business_risk_id'), "business_risk": SQL('business_risk_id'), "status_id": SQL('status_id'), "status": SQL('status_id'), } default_sort_columns = ['id'] filterable_columns = { "synopsis": CveMetadata.cve, "description": CveMetadata.description } super(CvesListView, self).__init__(query, sortable_columns, default_sort_columns, filterable_columns, list_args, args, uri)
def __init__(self, list_args, query_args, filter_args, parsed_args, uri): query = ( SystemVulnerabilities.select( CveMetadata.cve.alias('cve_name'), CveMetadata.cvss3_score, CveMetadata.cvss2_score, CveImpact.name.alias('impact'), CveMetadata.public_date, CveMetadata.description.alias('cve_description'), Status.id.alias('status_id'), Status.name.alias('status_name')).join( SystemPlatform, on=(SystemVulnerabilities.inventory_id == SystemPlatform.inventory_id)).join( CveMetadata, on=(SystemVulnerabilities.cve == CveMetadata.cve)) .join(CveImpact, on=(CveMetadata.impact_id == CveImpact.id)).join( Status, on=(SystemVulnerabilities.status_id == Status.id)).where( SystemVulnerabilities.when_mitigated.is_null(True)).where( SystemPlatform.rh_account == query_args['rh_account_number']). where(SystemPlatform.inventory_id == query_args['inventory_id'])) if 'cvss_from' in filter_args and filter_args['cvss_from']: query = query.where( CveMetadata.cvss3_score >= filter_args['cvss_from']) if 'cvss_to' in filter_args and filter_args['cvss_to']: query = query.where( CveMetadata.cvss3_score <= filter_args['cvss_to']) if 'public_from' in filter_args and filter_args['public_from']: query = query.where( CveMetadata.public_date >= filter_args['public_from']) if 'public_to' in filter_args and filter_args['public_to']: query = query.where( CveMetadata.public_date <= filter_args['public_to']) if 'status_id' in filter_args and filter_args['status_id']: query = query.where(Status.id << filter_args['status_id']) if 'severity' in filter_args and filter_args['severity']: query = query.where( CveMetadata.impact_id << filter_args['severity']) query = query.dicts() sortable_columns = { 'cve': SystemVulnerabilities.cve, 'synopsis': SystemVulnerabilities.cve, 'public_date': CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 'cvss_score': Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, 'impact': CveMetadata.impact_id, 'status': Status.id } filterable_columns = { 'cve': SystemVulnerabilities.cve, 'description': CveMetadata.description, 'status': Status.name } super(SystemCvesView, self).__init__(query, sortable_columns, filterable_columns, list_args, parsed_args, uri)
async def get_clan_members(self, clan_ids, sorted_by=None): username = Case(ClanMember.platform_id, ((constants.PLATFORM_XBOX, Member.xbox_username), (constants.PLATFORM_PSN, Member.psn_username), (constants.PLATFORM_BLIZZARD, Member.psn_username), (constants.PLATFORM_STEAM, Member.steam_username), (constants.PLATFORM_STADIA, Member.stadia_username))) query = Member.select( Member, ClanMember, Clan, username.alias('username')).join( ClanMember).join(Clan).where(Clan.clan_id << clan_ids) if sorted_by == 'join_date': query = query.order_by(ClanMember.join_date) elif sorted_by == 'username': query = query.order_by(username) return await self.execute(query)
def order_by_uuid_with_random_start(select_query: Select, uuid_field: Field) -> Select: """ Applies an "order by" on a query using a passed UUID field. The "order by" starts at a random UUID and then loops back to the minimum UUID include all entities. :param select_query: The query to add the "order by" to. :param uuid_field: The UUID field to order on. :return: The query updated to include the "order by". """ random_start_case = Case(None, [(uuid_field > uuid4(), 0)], 1) updated_select_query = select_query.order_by(random_start_case, uuid_field) return updated_select_query
def left_in_exercise(cls, exercise: Exercise) -> int: one_if_is_checked = Case(Solution.state, ((Solution.STATES.DONE.name, 1), ), 0) active_solutions = cls.state.in_(Solution.STATES.active_solutions()) response = cls.filter( cls.exercise == exercise, active_solutions, ).select( fn.Count(cls.id).alias('submitted'), fn.Sum(one_if_is_checked).alias('checked'), ).dicts().get() return int(response['checked'] * 100 / response['submitted'])
def __init__(self, list_args, query_args, uri, args={}): # pylint: disable=dangerous-default-value join_type = JOIN.INNER cve_count = CveAffectedSystemsCache.systems_affected if query_args['hide_satellite_managed']: cve_count = CveAffectedSystemsCache.direct_systems_affected if 'show_all' in args and args['show_all']: join_type = JOIN.RIGHT_OUTER cve_count = fn.COALESCE(cve_count, 0) query = ( CveAffectedSystemsCache .select(cve_count.alias("systems_affected"), CveMetadata.cve.alias("cve_name"), CveMetadata.cvss3_score, CveMetadata.cvss2_score, CveMetadata.impact_id, CveMetadata.public_date, CveMetadata.description.alias("cve_description")) .join(CveMetadata, join_type, on=((CveAffectedSystemsCache.cve == CveMetadata.cve) & (CveAffectedSystemsCache.rh_account == query_args["rh_account_number"]))) ) if query_args['hide_satellite_managed'] and not ('show_all' in args and args['show_all']): query = query.where(CveAffectedSystemsCache.direct_systems_affected > 0) if 'cvss_from' in args and args['cvss_from']: query = query.where(CveMetadata.cvss3_score >= args['cvss_from']) if 'cvss_to' in args and args['cvss_to']: query = query.where(CveMetadata.cvss3_score <= args['cvss_to']) if 'public_from' in args and args['public_from']: query = query.where(CveMetadata.public_date >= args['public_from']) if 'public_to' in args and args['public_to']: query = query.where(CveMetadata.public_date <= args['public_to']) if 'severity' in args and args['severity']: query = query.where(CveMetadata.impact_id << args['severity']) query = query.dicts() sortable_columns = { "systems_affected": SQL('systems_affected'), "synopsis": CveMetadata.cve, "public_date": CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 "cvss_score": Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, "impact": CveMetadata.impact_id } filterable_columns = { "synopsis": CveMetadata.cve, "description": CveMetadata.description } super(CvesListView, self).__init__(query, sortable_columns, filterable_columns, list_args, args, uri)
def status(): fields = [ Exercise.id, Exercise.subject.alias('name'), fn.Count(Solution.id).alias('submitted'), fn.Sum(Case(Solution.is_checked, ((True, 1), ), 0)).alias('checked'), ] solutions = ( Exercise.select(*fields).join( Solution, 'LEFT OUTER', on=(Solution.exercise == Exercise.id)).where( Exercise.is_archived == False) # NOQA: E712 .group_by(Exercise.subject, Exercise.id).order_by(Exercise.id)) return render_template( 'status.html', exercises=solutions, )
async def move_shape_order(sid, data): sid_data = state.sid_map[sid] user = sid_data["user"] room = sid_data["room"] location = sid_data["location"] shape = Shape.get(uuid=data["shape"]["uuid"]) layer = shape.layer if room.creator != user and not layer.player_editable: logger.warning( f"{user.name} attempted to move a shape order on a dm layer") return target = data["index"] + 1 sign = 1 if target <= 1 else -1 polarity = 1 if shape.index > 0 else -1 case = Case( None, ( (Shape.index == shape.index, target * (-polarity)), ( (polarity * sign * Shape.index) < (polarity * sign * shape.index), (Shape.index + (polarity * sign * 1)) * -1, ), ), Shape.index * -1, ) Shape.update(index=case).where(Shape.layer == layer).execute() if layer.player_visible: await sio.emit( "Shape.Order.Set", data, room=location.get_path(), skip_sid=sid, namespace="/planarally", ) else: for csid in state.get_sids(user=room.creator, room=room): if csid == sid: continue await sio.emit("Shape.Order.Set", data["shape"], room=csid, namespace="/planarally")
def _id_query(rh_account_id, list_args): selectables = [SystemPlatform.inventory_id, SystemPlatform.opt_out] # pylint: disable=singleton-comparison if list_args["sort"] and "cve_count" in list_args["sort"]: selectables.append( Case(None, (((SystemPlatform.opt_out == False), SystemPlatform.cve_count_cache), ), None).alias("cve_count")) query = (SystemPlatform.select(*selectables).where( SystemPlatform.rh_account_id == rh_account_id).where( SystemPlatform.last_evaluation.is_null(False) | SystemPlatform.advisor_evaluated.is_null(False)).where( SystemPlatform.when_deleted.is_null(True)).dicts()) return query
def _execute(self) -> Tuple[List[TopBalanceOutput]]: """ Execute stock level transaction :return: number of items with lower stock quantity than threshold """ # Get customers with warehouse and district information top_customer_query = (Customer.select( Case( None, (( Customer.middle_name.is_null(), fn.CONCAT( Customer.first_name, " ", Customer.last_name, ), ), ), fn.CONCAT( Customer.first_name, Customer.middle_name, Customer.last_name, ), ).alias("customer_name"), Customer.balance.alias("customer_balance"), Customer.warehouse_id.alias("warehouse_id"), Customer.district_id.alias("district_id"), ).order_by(Customer.balance.desc()).limit( TopBalanceTransaction.LIMIT).cte("top_customer_query")) top_balance_query = (top_customer_query.select_from( top_customer_query.c.customer_name, top_customer_query.c.customer_balance, Warehouse.name.alias("warehouse_name"), District.name.alias("district_name"), ).join( District, on=((top_customer_query.c.warehouse_id == District.warehouse_id) & (top_customer_query.c.district_id == District.id)), ).join( Warehouse, on=(top_customer_query.c.warehouse_id == Warehouse.id), ).order_by(top_customer_query.c.customer_balance.desc()).with_cte( top_customer_query)) return ([result for result in top_balance_query.dicts()], )
def status(cls): one_if_is_checked = Case(Solution.state, ((Solution.STATES.DONE.name, 1), ), 0) fields = [ Exercise.id, Exercise.subject.alias('name'), Exercise.is_archived.alias('is_archived'), fn.Count(Solution.id).alias('submitted'), fn.Sum(one_if_is_checked).alias('checked'), ] join_by_exercise = (Solution.exercise == Exercise.id) active_solutions = Solution.state.in_( Solution.STATES.active_solutions()) return (Exercise.select(*fields).join( Solution, 'LEFT OUTER', on=join_by_exercise).where(active_solutions).group_by( Exercise.subject, Exercise.id).order_by(Exercise.id))
def order_by_dataset_split_with_random_start(select_query: Select, dataset_split_field: Field, available_dataset_splits: Union[List[int], None]) -> Select: """ Applies an "order by" on a query using a passed dataset_split field. The "order by" starts at a random dataset_split out of the passed available options, then loops back to the minimum dataset_split to include all entities. :param select_query: The query to add the "order by" to. :param dataset_split_field: The dataset_split field to order on. :param available_dataset_splits: The available dataset_splits to start on. :return: The query updated to include the "order by". """ if available_dataset_splits is None: available_dataset_splits = list(range(10)) start_dataset_split = random.choice(available_dataset_splits) start_case = Case(None, [(dataset_split_field >= start_dataset_split, 0)], 1) updated_select_query = select_query.order_by(start_case, dataset_split_field) return updated_select_query
async def move_shape_order(sid: int, data: Dict[str, Any]): pr: PlayerRoom = game_state.get(sid) shape = Shape.get(uuid=data["shape"]["uuid"]) layer = shape.layer if pr.role != Role.DM and not layer.player_editable: logger.warning( f"{pr.player.name} attempted to move a shape order on a dm layer") return target = data["index"] sign = 1 if target < shape.index else -1 case = Case( None, ( (Shape.index == shape.index, target), ((sign * Shape.index) < (sign * shape.index), (Shape.index + (sign * 1))), ), Shape.index, ) Shape.update(index=case).where(Shape.layer == layer).execute() if layer.player_visible: await sio.emit( "Shape.Order.Set", data, room=pr.active_location.get_path(), skip_sid=sid, namespace=GAME_NS, ) else: for csid in game_state.get_sids(player=pr.room.creator, room=pr.room): if csid == sid: continue await sio.emit("Shape.Order.Set", data["shape"], room=csid, namespace=GAME_NS)
def status(cls, course_id: Optional[int] = None): one_if_is_checked = Case( Solution.state, ((Solution.STATES.DONE.name, 1), ), 0, ) fields = ( Exercise.id, Exercise.course, Exercise.subject.alias('name'), Exercise.is_archived.alias('is_archived'), fn.Count(Solution.id).alias('submitted'), fn.Sum(one_if_is_checked).alias('checked'), ) active_solution_states = Solution.STATES.active_solutions() active_solutions = Solution.state.in_(active_solution_states) right_course = (course_id is None) or course_id == Course.id return (Exercise.select(*fields).join( Course, on=(Course.id == Exercise.course)).switch().join( Solution, on=(Solution.exercise == Exercise.id )).where(active_solutions & right_course).group_by( Exercise.subject, Exercise.id).order_by(Exercise.id))
def __init__(self, list_args, query_args, uri, args={}): # pylint: disable=dangerous-default-value join_type = JOIN.INNER cve_count = CveAccountData.systems_affected if 'show_all' in args and args['show_all']: join_type = JOIN.RIGHT_OUTER cve_count = fn.COALESCE(cve_count, 0) query = (CveAccountData.select( cve_count.alias("systems_affected"), CveMetadata.cve.alias("cve_name"), CveMetadata.cvss3_score, CveMetadata.cvss2_score, CveMetadata.impact_id, CveMetadata.public_date, CveMetadata.description.alias("cve_description"), fn.COALESCE(CveAccountData.business_risk_id, 0).alias('business_risk_id'), CveAccountData.business_risk_text.alias('business_risk_text'), fn.COALESCE(BusinessRisk.name, DEFAULT_BUSINESS_RISK).alias('business_risk'), fn.COALESCE(CveAccountData.status_id, 0).alias('status_id'), CveAccountData.status_text.alias('status_text'), fn.COALESCE(Status.name, DEFAULT_STATUS).alias('status'), fn.COALESCE(CveAccountData.systems_status_divergent, 0).alias('systems_status_divergent') ).join( RHAccount, on=(CveAccountData.rh_account_id == RHAccount.id)).join( CveMetadata, join_type, on=((CveAccountData.cve_id == CveMetadata.id) & (RHAccount.name == query_args["rh_account_number"]))).join( BusinessRisk, JOIN.LEFT_OUTER, on=(CveAccountData.business_risk_id == BusinessRisk.id )).join( Status, JOIN.LEFT_OUTER, on=(CveAccountData.status_id == Status.id))) if 'show_all' not in args or not args['show_all']: query = query.where(CveAccountData.systems_affected > 0) if 'cvss_from' in args and args['cvss_from']: query = query.where( fn.COALESCE(CveMetadata.cvss3_score, CveMetadata.cvss2_score) >= args['cvss_from']) if 'cvss_to' in args and args['cvss_to']: query = query.where( fn.COALESCE(CveMetadata.cvss3_score, CveMetadata.cvss2_score) <= args['cvss_to']) if 'public_from' in args and args['public_from']: query = query.where(CveMetadata.public_date >= args['public_from']) if 'public_to' in args and args['public_to']: query = query.where(CveMetadata.public_date <= args['public_to']) if 'impact' in args and args['impact']: query = query.where(CveMetadata.impact_id << args['impact']) if 'business_risk_id' in args and args['business_risk_id']: query = query.where( fn.COALESCE(CveAccountData.business_risk_id, 0) << args['business_risk_id']) if 'status_id' in args and args['status_id']: query = query.where( fn.COALESCE(CveAccountData.status_id, 0) << args['status_id']) query = query.dicts() sortable_columns = { "systems_affected": SQL('systems_affected'), "id": CveMetadata.id, "synopsis": CVE_SYNOPSIS_SORT, "public_date": CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 "cvss_score": Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, "impact_id": CveMetadata.impact_id, "impact": CveMetadata.impact_id, "business_risk_id": SQL('business_risk_id'), "business_risk": SQL('business_risk_id'), "status_id": SQL('status_id'), "status": SQL('status_id'), } default_sort_columns = ['id'] filterable_columns = { "synopsis": CveMetadata.cve, "description": CveMetadata.description } super(CvesListView, self).__init__(query, sortable_columns, default_sort_columns, filterable_columns, list_args, args, uri)
def __init__(self, list_args, query_args, filter_args, parsed_args, uri): # pylint: disable=singleton-comparison query = ( SystemVulnerabilities.select( CveMetadata.cve.alias('cve_name'), CveMetadata.cvss3_score, CveMetadata.cvss2_score, CveImpact.name.alias('impact'), CveMetadata.public_date, CveMetadata.description.alias('cve_description'), fn.COALESCE(CveAccountData.status_id, 0).alias('cve_status_id'), Status.id.alias('status_id'), Status.name.alias('status_name'), SystemVulnerabilities.status_text.alias('status_text'), fn.COALESCE(CveAccountData.business_risk_id, 0).alias('business_risk_id'), CveAccountData.business_risk_text.alias('business_risk_text'), fn.COALESCE(BusinessRisk.name, DEFAULT_BUSINESS_RISK).alias('business_risk'), InsightsRule.name.alias('rule_id'), InsightsRule.description_text, InsightsRule.summary_text, InsightsRule.generic_text, InsightsRule.reboot_required, InsightsRule.playbook_count, InsightsRule.change_risk, InsightsRule.kbase_node_id, InsightsRule.active.alias('rule_active')).join( SystemPlatform, on=(SystemVulnerabilities.system_id == SystemPlatform.id)). join(CveMetadata, on=(SystemVulnerabilities.cve_id == CveMetadata.id)).join( CveImpact, on=(CveMetadata.impact_id == CveImpact.id)).join( Status, on=(SystemVulnerabilities.status_id == Status.id)). join(RHAccount, on=(SystemPlatform.rh_account_id == RHAccount.id)).join( CveAccountData, JOIN.LEFT_OUTER, on=((CveAccountData.cve_id == CveMetadata.id) & (CveAccountData.rh_account_id == RHAccount.id))). join(BusinessRisk, JOIN.LEFT_OUTER, on=(CveAccountData.business_risk_id == BusinessRisk.id)).join( InsightsRule, JOIN.LEFT_OUTER, on=(InsightsRule.id == SystemVulnerabilities.rule_id)). where((SystemVulnerabilities.when_mitigated.is_null(True)) | (InsightsRule.active == True)).where( RHAccount.name == query_args['rh_account_number']).where( SystemPlatform.inventory_id == query_args['inventory_id']).where( SystemPlatform.opt_out == False) # noqa: E712 ) if 'cvss_from' in filter_args and filter_args['cvss_from']: query = query.where( fn.COALESCE(CveMetadata.cvss3_score, CveMetadata.cvss2_score) >= filter_args['cvss_from']) if 'cvss_to' in filter_args and filter_args['cvss_to']: query = query.where( fn.COALESCE(CveMetadata.cvss3_score, CveMetadata.cvss2_score) <= filter_args['cvss_to']) if 'public_from' in filter_args and filter_args['public_from']: query = query.where( CveMetadata.public_date >= filter_args['public_from']) if 'public_to' in filter_args and filter_args['public_to']: query = query.where( CveMetadata.public_date <= filter_args['public_to']) if 'status_id' in filter_args and filter_args['status_id']: query = query.where(Status.id << filter_args['status_id']) if 'impact' in filter_args and filter_args['impact']: query = query.where(CveMetadata.impact_id << filter_args['impact']) if 'business_risk_id' in filter_args and filter_args[ 'business_risk_id']: query = query.where( fn.COALESCE(CveAccountData.business_risk_id, 0) << filter_args['business_risk_id']) if 'stale' in filter_args and filter_args['stale']: query = query.where(SystemPlatform.stale == filter_args['stale']) else: query = query.where(SystemPlatform.stale == False) # noqa: E712 if 'security_rule' in filter_args and filter_args[ 'security_rule'] is not None: if filter_args['security_rule']: query = query.where(InsightsRule.active == True) # noqa: E712 else: query = query.where( (InsightsRule.active == False) | (InsightsRule.active == None)) # noqa: E712 query = query.dicts() sortable_columns = { 'id': CveMetadata.id, 'cve': CVE_SYNOPSIS_SORT, 'synopsis': CVE_SYNOPSIS_SORT, 'public_date': CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 'cvss_score': Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, 'impact_id': CveMetadata.impact_id, 'impact': CveMetadata.impact_id, 'status_id': Status.id, 'status': Status.id, 'business_risk_id': SQL('business_risk_id'), 'business_risk': SQL('business_risk_id'), } default_sort_columns = ['id'] filterable_columns = { 'cve': CveMetadata.cve, 'description': CveMetadata.description, 'status': Status.name } super(SystemCvesView, self).__init__(query, sortable_columns, default_sort_columns, filterable_columns, list_args, parsed_args, uri)
def search(cls, term, fields=()): """Generic SQL search function that uses SQL ``LIKE`` to search the database for matching records. The records are sorted by their relavancey to the search term. The query searches and sorts on the folling criteria, in order, where the target string is ``exactly``: 1. Straight equality (``x = 'exactly'``) 2. Right hand ``LIKE`` (``x LIKE 'exact%'``) 3. Substring ``LIKE`` (``x LIKE %act%``) Args: term (str): The search term to apply to the query. Keyword Args: fields (list|tuple|None): An optional list of fields to apply the search to. If not provided, the class variable ``Meta.search_fields`` will be used by default. Returns: peewee.SelectQuery: An unexecuted query for the records. Raises: AttributeError: Raised if `search_fields` isn't defined in the class and `fields` aren't provided for the function. """ if not any((cls._meta.search_fields, fields)): raise AttributeError( "A list of searchable fields must be provided in the class's " "search_fields or provided to this function in the `fields` " "kwarg.") # If fields are provided, override the ones in the class if not fields: fields = cls._meta.search_fields query = cls.select() # Cache the LIKE terms like_term = ''.join((term, '%')) full_like_term = ''.join(('%', term, '%')) # Cache the order by terms # @TODO Peewee's order_by supports an `extend` kwarg will will allow # for updating of the order by part of the query, but it's only # supported in Peewee 2.8.5 and newer. Determine if we can support this # before switching. # http://docs.peewee-orm.com/en/stable/peewee/api.html#SelectQuery.order_by order_by = [] # Store the clauses seperately because it is needed to perform an OR on # them and that's somehow impossible with their query builder in # a loop. clauses = [] for field_name in fields: # Cache the field, raising an exception if the field doesn't # exist. field = getattr(cls, field_name) # Apply the search term case insensitively clauses.append((field == term) | (field**like_term) | (field**full_like_term)) order_by.append( Case( None, ( # Straight matches should show up first (field == term, 0), # Similar terms should show up second (field**like_term, 1), # Substring matches should show up third (field**full_like_term, 2), ), default=3).asc()) # Apply the clauses to the query query = query.where(reduce(operator.or_, clauses)) # Apply the sort order so it's influenced by the search term relevance. query = query.order_by(*order_by) return query
def audit_evaluated_metric(event): status = False try: status = False sqs = GdsSqsClient(app) for message in event: audit_criteria_data = json.loads(message.body) audit = models.AccountAudit.get_by_id( audit_criteria_data["account_audit_id"]["id"]) # Processed = count where processed = True processed_case = Case(None, [(models.AuditCriterion.processed, 1)], 0) # Attempted = count where attempted = True attempted_case = Case(None, [(models.AuditCriterion.attempted, 1)], 0) # Count where failed resources > 0 failed_case = Case(None, [(models.AuditCriterion.failed > 0, 1)], 0) # Collate stats from audit criteria records stats = (models.AuditCriterion.select( fn.COUNT(models.AuditCriterion.id).alias("active_criteria"), fn.SUM(attempted_case).alias("attempted_criteria"), fn.SUM(processed_case).alias("processed_criteria"), fn.SUM(failed_case).alias("failed_criteria"), fn.SUM(models.AuditCriterion.failed).alias("failed_resources"), ).where(models.AuditCriterion.account_audit_id == audit).get()) app.log.debug((f"Processed: {stats.processed_criteria} " f"Failed checks: {stats.failed_criteria} " f"Failed resources: {stats.failed_resources}")) audit.criteria_processed = stats.processed_criteria audit.criteria_passed = stats.processed_criteria - stats.failed_criteria audit.criteria_failed = stats.failed_criteria audit.issues_found = stats.failed_resources audit.finished = stats.active_criteria == stats.attempted_criteria if audit.finished: audit.date_completed = datetime.now() message_data = audit.serialize() audit_criteria = (models.AuditCriterion.select().join( models.AccountAudit).where( models.AccountAudit.id == audit.id)) criteria_data = [] for criteria in audit_criteria: criteria_data.append(criteria.serialize()) message_data["criteria"] = criteria_data failed_resources = (models.ResourceCompliance.select().join( models.AuditResource).join(models.AccountAudit).where( models.ResourceCompliance.status_id == 3, models.AccountAudit.id == audit.id, )) resources_data = [] for resource in failed_resources: resources_data.append(resource.serialize()) message_data["failed_resources"] = resources_data # create SQS message queue_url = sqs.get_queue_url( f"{app.prefix}-completed-audit-queue") app.log.debug("Retrieved queue url: " + queue_url) message_body = app.utilities.to_json(message_data) try: latest = models.AccountLatestAudit.get( models.AccountLatestAudit.account_subscription_id == audit.account_subscription_id) latest.account_audit_id = audit latest.save() except models.AccountLatestAudit.DoesNotExist: latest = models.AccountLatestAudit.create( account_subscription_id=audit.account_subscription_id, account_audit_id=audit, ) app.log.debug("latest_audit: " + app.utilities.to_json(latest.serialize())) message_id = sqs.send_message( queue_url, message_body) # TODO: unecessary assignment? audit.save() except Exception as err: app.log.error(str(err)) return status
def __init__(self, rh_account_id, cve_cache_from, cve_cache_keepalive, list_args, uri, args): join_type = JOIN.INNER if args['affecting'] is None or False in args[ 'affecting'] or True not in args['affecting']: join_type = JOIN.LEFT_OUTER # API using cache, set keepalive for account to enable maintaining cache update_cve_cache_keepalive(rh_account_id, cve_cache_keepalive) # Use cache if not disabled + systems are not filtered + cache exists if not DISABLE_ACCOUNT_CACHE and not is_cyndi_request( args) and cve_cache_from: count_subquery = self._cached_count_subquery(rh_account_id) else: count_subquery = self._count_subquery(rh_account_id) count_subquery = cyndi_join(count_subquery) if is_cyndi_request(args): count_subquery = apply_filters(count_subquery, args, [ filter_types.SYSTEM_TAGS, filter_types.SYSTEM_SAP, filter_types.SYSTEM_SAP_SIDS, filter_types.SYSTEM_RHEL_VERSION ], {}) query = self._full_query(rh_account_id, join_type, count_subquery) query = apply_filters(query, args, [ filter_types.CVE_BUSINESS_RISK, filter_types.CVE_CVSS, filter_types.CVE_IMPACT, filter_types.CVE_PUBLIC_DATE, filter_types.CVE_RULE_PRESENCE, filter_types.CVE_STATUS, filter_types.CVE_AFFECTING, filter_types.CVE_KNOWN_EXPLOITS ], {"count_subquery": count_subquery}) query = query.dicts() sortable_columns = { "systems_affected": SQL('systems_affected'), "id": CveMetadata.id, "synopsis": CVE_SYNOPSIS_SORT, "public_date": CveMetadata.public_date, # This assumes we only show one score, and that cvss3 wins over cvss2 "cvss_score": Case(None, ((CveMetadata.cvss3_score.is_null(True), CveMetadata.cvss2_score),), \ CveMetadata.cvss3_score), "cvss3_score": CveMetadata.cvss3_score, "cvss2_score": CveMetadata.cvss2_score, "impact_id": CveMetadata.impact_id, "impact": CveMetadata.impact_id, "business_risk_id": SQL('business_risk_id'), "business_risk": SQL('business_risk_id'), "status_id": SQL('status_id'), "status": SQL('status_id'), } default_sort_columns = { 'default': 'id', 'cvss_score': 'public_date', 'cvss2_score': 'public_date', 'cvss3_score': 'public_date', 'public_date': 'synopsis', 'impact': 'public_date', 'systems_affected': 'public_date' } filterable_columns = { "synopsis": CveMetadata.cve, "description": CveMetadata.description, } if list_args["filter"]: filter_expressions = { "rule_description": fn.Exists( CveRuleMapping.select(CveRuleMapping.cve_id).join( InsightsRule, on=(CveRuleMapping.rule_id == InsightsRule.id)).where( CveRuleMapping.cve_id == CveMetadata.id).where( InsightsRule.description_text.contains( list_args["filter"]))) } else: filter_expressions = {} super().__init__(query, sortable_columns, default_sort_columns, filterable_columns, filter_expressions, list_args, args, uri)
def sequential(*included_interviews, included_properties=None, client_as_numeric=True): """ datasets.sequential(*included_interviews, included_properties) -> pandas.DataFrame Builds a sequential dataset with including those interviews specified in included_interviews and the properties specified in included_properties :param included_interviews: sequence of interviews to be included in the dataset :param included_properties: Sequence of CodingProperty whose data is to be included (can be ID as well) :return: pandas.DataFrame """ # No need to include properties twice included_properties = sorted(set(included_properties)) property_ctes = [] display_names = [] property_cases = [] cast_columns = [] property_query = UtteranceCode.select(UtteranceCode.utterance_id, PropertyValue.pv_value, CodingProperty.cp_data_type) \ .join(PropertyValue)\ .join(CodingProperty) STR_MSNG = '-999999999999999' NUM_MSNG = -999999999999999 client_column = Cast( Interview.client_id, "INT").alias('client_id') if client_as_numeric else Interview.client_id # The dataset construction needs to be atomic to avoid race conditions with UtteranceCode._meta.database.atomic() as transaction: # Having the display name will be required for giving columns useful names, so fetch them cp_dict = { itm[0]: ( itm[1], itm[2], ) for itm in CodingProperty.select( CodingProperty.coding_property_id, CodingProperty. cp_display_name, CodingProperty.cp_data_type).where( CodingProperty.coding_property_id.in_( included_properties)).tuples().execute() } # Need a CTE for each property whose data is to be included, so construct queries and convert to CTE # Need to conditionally create a CAST expression as well because some properties are Numeric, some are STR for prop_pk in included_properties: cp_display_name, cp_data_type = cp_dict.get( int(prop_pk), (None, None)) if cp_display_name is None: logging.warning( f"CodingProperty with id of {prop_pk} not found. This data will not be included..." ) continue if cp_data_type == 'numeric': cast_columns.append(cp_display_name) cte = property_query.where(PropertyValue.coding_property_id == prop_pk)\ .cte(f"cte_{cp_display_name}", columns=['utterance_id', cp_display_name, 'cp_data_type']) data_field = getattr(cte.c, cp_display_name) property_ctes.append(cte) pc = Case(None, ((data_field.is_null(), STR_MSNG), ), data_field) property_cases.append(pc) display_names.append(cp_display_name) # The outer query will select the Utterances of the interview. # any CTE will match on the Utterannce.utterance_id field and insert the appropriate fields with codes # outer query needs to include the fields of the CTE as well, so start there basic_query = Interview.select(Interview.interview_name, Interview.rater_id, client_column, Interview.session_number, Utterance.utt_line, Utterance.utt_enum, Utterance.utt_role, *(Cast(pc, "FLOAT").alias(name) if name in cast_columns else pc.alias(name) for name, pc in zip(display_names, property_cases)), Utterance.utt_text, Utterance.utt_start_time, Utterance.utt_end_time)\ .join(Utterance) # Once the basic query is constructed, the joins need to be added into the query # so that the fields of the CTE can be queried property for name, cte in zip(display_names, property_ctes): basic_query = basic_query.join( cte, JOIN.LEFT_OUTER, on=(Utterance.utterance_id == cte.c.utterance_id)) # Final step of query preparation is to add in the CTE themselves and narrow the results basic_query = basic_query.with_cte(*property_ctes) basic_query = basic_query.where(Interview.interview_name.in_(included_interviews))\ .order_by(client_column, Interview.session_number, Utterance.utt_enum) results = basic_query.tuples().execute() columns = [itm[0] for itm in results.cursor.description] df = pandas.DataFrame(data=results, columns=columns).replace([NUM_MSNG, STR_MSNG], [NaN, '']) return df
def session_level(included_interviews=None, included_properties=None, included_globals=None, client_as_numeric=True): """ session_level(interview_names) -> pandas.DataFrame Builds a session-level DataFrame with counts for interviews named in interview_names :param included_interviews: iterable of Interview.interview_names to be included in the Dataset :param included_properties: iterable of CodingProperty.coding_property_id to be included :param included_globals: iterable of GlobalProperty.global_property_id to be included :param client_as_numeric: Whether to cast client_id as a numeric variable. Default True :return: pandas.DataFrame """ # may want the client_id cast as numeric client_column = Cast( Interview.client_id, "INT").alias('client_id') if client_as_numeric else Interview.client_id var_column = CodingProperty.cp_display_name.concat("_").concat( PropertyValue.pv_value).alias('property') # May want only certain interviews included or certain properties included, # so construct some predicates for where clauses, if necessary p1 = Interview.interview_name.in_(included_interviews) p2 = (CodingProperty.coding_property_id.in_(included_properties)) p3 = (GlobalProperty.global_property_id.in_(included_globals)) predicate = ((p1) & (p2)) if included_interviews is not None and included_properties is not None else \ (p1) if included_interviews is not None else \ (p2) if included_properties is not None else \ None global_predicate = ((p1) & (p3)) if included_interviews is not None and included_globals is not None else \ (p1) if included_interviews is not None else \ (p3) if included_globals is not None else \ None # The most difficult part about building the count data is constructing the query. # Construct it by parts to make the job simpler # For a session-level dataset, we want counts of all codes assigned, but we also want scores for all # session-level globals. Thus, there will need to be a UNION ALL of counts and global ratings. # Below constructs the global ratings part of the UNION ALL global_query = (GlobalRating.select( GlobalRating.interview_id, GlobalProperty.gp_name, Cast(GlobalValue.gv_value, "INT"), GlobalValue.global_property_id).join(GlobalValue).join( GlobalProperty, JOIN.LEFT_OUTER)) global_cte = global_query.cte( "global_cte", columns=['interview_id', 'gp_name', 'gv_value', 'global_property_id']) outer_global_query = (Interview.select( Interview.interview_name, client_column, Interview.rater_id, Interview.session_number, GlobalProperty.gp_name, global_cte.c.gv_value).join(CodingSystem).join(GlobalProperty)) full_global_query = outer_global_query.join( global_cte, JOIN.LEFT_OUTER, on=((Interview.interview_id == global_cte.c.interview_id) & (GlobalProperty.global_property_id == global_cte.c.global_property_id))) # Append the predicate, if any was specified if global_predicate is not None: full_global_query = full_global_query.where(global_predicate) # Below constructs the code frequency part of the UNION ALL # inner_query is the CTE that selects the existing count data. Is later joined with an outer inner_query = (UtteranceCode.select( Utterance.interview_id, UtteranceCode.property_value_id, fn.COUNT(UtteranceCode.property_value_id)).join(Utterance).group_by( Utterance.interview_id, UtteranceCode.property_value_id)) # The inner query needs to be used as a table expression, so that it can be joined with the outer query properly cte = inner_query.cte('cte', columns=('interview_id', 'pvid', 'cnt')) # We want to enter zero when the result of the join is NULL # (Null indicates that a count for a PropertyValue was zero # because there is no related record in the UtteranceCode table having the specified PropertyValue) case = Case(None, ((cte.c.cnt.is_null(), 0), ), (cte.c.cnt)) outer_query = (Interview.select( Interview.interview_name, client_column, Interview.rater_id, Interview.session_number, var_column, case.alias('var_count')).join(CodingSystem).join(CodingProperty).join( PropertyValue)) # Perform the joins on the CTE and do the union all for the final query full_query = (outer_query.join( cte, JOIN.LEFT_OUTER, on=((PropertyValue.property_value_id == cte.c.pvid) & (Interview.interview_id == cte.c.interview_id))).with_cte( cte, global_cte)) if predicate is not None: full_query = full_query.where(predicate) full_query = (full_query.union_all(full_global_query).order_by( client_column, Interview.session_number, Interview.rater_id, var_column)) # pull the query results into a dataframe, then reshape it # Some DBMS lack the pivot function so reshaping the DataFrame itself rather than the query is necessary df = pandas.DataFrame.from_records(data=full_query.tuples().execute(), columns=[ 'interview_name', 'client_id', 'rater_id', 'session_number', 'var_name', 'var_value' ]) df = df.set_index([ 'interview_name', 'client_id', 'rater_id', 'session_number', 'var_name' ]).unstack('var_name').loc[:, 'var_value'].reset_index() return df
def _execute(self) -> Tuple[List[PopularItemsOutput]]: """ Execute new payment transaction :return: relevant output information """ # Get order table joined with customer table order_customer_query = (Order.select( Order.id.alias("order_id"), Order.district_id.alias("district_id"), Order.warehouse_id.alias("warehouse_id"), Order.entry_date.alias("entry_date"), Customer.middle_name.alias("middle_name"), Customer.first_name.alias("first_name"), Customer.last_name.alias("last_name"), ).join( Customer, on=((Order.warehouse_id == Customer.warehouse_id) & (Order.district_id == Customer.district_id) & (Order.customer_id == Customer.id)), ).where((Order.warehouse_id == self.warehouse_id) & (Order.district_id == self.district_id)).order_by( Order.entry_date.desc()).limit( self.orders_to_examine).cte("order_customer_query")) # Get order lines with maximum quantity, joined with item table OrderLineInner: OrderLine = OrderLine.alias() order_line_sum_qty_query = (OrderLineInner.select( OrderLineInner.warehouse_id.alias("warehouse_id"), OrderLineInner.district_id.alias("district_id"), OrderLineInner.order_id.alias("order_id"), fn.SUM(OrderLineInner.quantity).alias("sum_qty"), ).where((OrderLineInner.warehouse_id == self.warehouse_id) & (OrderLineInner.district_id == self.district_id)).group_by( OrderLineInner.warehouse_id, OrderLineInner.district_id, OrderLineInner.order_id, OrderLineInner.item_id, ).cte("order_line_sum_qty_query")) order_line_max_qty_query = (order_line_sum_qty_query.select( order_line_sum_qty_query.c.order_id, fn.MAX(order_line_sum_qty_query.c.sum_qty), ).group_by( order_line_sum_qty_query.c.warehouse_id, order_line_sum_qty_query.c.district_id, order_line_sum_qty_query.c.order_id, ).with_cte(order_line_sum_qty_query)) customer_name_field = Case( None, (( order_customer_query.c.middle_name.is_null(), fn.CONCAT( order_customer_query.c.first_name, " ", order_customer_query.c.last_name, ), ), ), fn.CONCAT( order_customer_query.c.first_name, order_customer_query.c.middle_name, order_customer_query.c.last_name, ), ).alias("customer_name") popular_items_query = (OrderLine.select( order_customer_query.c.order_id, order_customer_query.c.entry_date, customer_name_field, fn.SUM(OrderLine.quantity).alias("quantity"), Item.id.alias("item_id"), Item.name.alias("item_name"), ).join( order_customer_query, on=((OrderLine.warehouse_id == order_customer_query.c.warehouse_id) & (OrderLine.district_id == order_customer_query.c.district_id) & (OrderLine.order_id == order_customer_query.c.order_id)), ).join(Item, on=(OrderLine.item_id == Item.id)).group_by( order_customer_query.c.order_id, order_customer_query.c.entry_date, customer_name_field, Item.id, Item.name, ).having( DBTuple(order_customer_query.c.order_id, fn.SUM( OrderLine.quantity)).in_(order_line_max_qty_query)).order_by( order_customer_query.c.order_id.desc(), fn.SUM(OrderLine.quantity).desc(), ).with_cte(order_customer_query)) # Process query output return ([result for result in popular_items_query.dicts()], )
def handle_get(cls, **kwargs): # pylint: disable=singleton-comparison args_desc = [{ 'arg_name': 'tags', 'convert_func': parse_tags }, { 'arg_name': 'sap_system', 'convert_func': None }, { 'arg_name': 'sap_sids', 'convert_func': None }] args = cls._parse_arguments(kwargs, args_desc) cyndi_reuest = is_cyndi_request(args) filters = [ filter_types.SYSTEM_TAGS, filter_types.SYSTEM_SAP, filter_types.SYSTEM_SAP_SIDS ] rh_account, cve_cache_from, cve_cache_keepalive = get_account_data( connexion.context["user"]) update_cve_cache_keepalive(rh_account, cve_cache_keepalive) if not DISABLE_ACCOUNT_CACHE and not cyndi_reuest and cve_cache_from: query = (CveAccountCache.select( fn.Count( fn.Distinct( Case(CveMetadata.exploits, ((True, CveMetadata.id), )))).alias("exploitable_cves"), fn.Count( fn.Distinct( Case(CveMetadata.impact_id, ((7, CveMetadata.id), )))).alias("critical_cves"), fn.Count( fn.Distinct( Case( CveMetadata.impact_id, ((5, CveMetadata.id), )))).alias("important_cves"), fn.Count( fn.Distinct( Case(None, ((InsightsRule.id.is_null(False), CveMetadata.id), )))).alias("cves_with_rule") ).join(CveMetadata, on=(CveAccountCache.cve_id == CveMetadata.id)).join( CveRuleMapping, JOIN.LEFT_OUTER, on=((CveAccountCache.cve_id == CveRuleMapping.cve_id))). join(InsightsRule, JOIN.LEFT_OUTER, on=((CveRuleMapping.rule_id == InsightsRule.id) & (InsightsRule.active == True) & (~InsightsRule.rule_only))).where( CveAccountCache.rh_account_id == rh_account)) else: query = (SystemVulnerabilities.select( fn.Count( fn.Distinct( Case(CveMetadata.exploits, ((True, CveMetadata.id), )))).alias("exploitable_cves"), fn.Count( fn.Distinct( Case(CveMetadata.impact_id, ((7, CveMetadata.id), )))).alias("critical_cves"), fn.Count( fn.Distinct( Case( CveMetadata.impact_id, ((5, CveMetadata.id), )))).alias("important_cves"), fn.Count( fn.Distinct( Case(None, ((InsightsRule.id.is_null(False), CveMetadata.id), )))).alias("cves_with_rule") ).join( CveMetadata, on=(SystemVulnerabilities.cve_id == CveMetadata.id)).join( CveRuleMapping, JOIN.LEFT_OUTER, on=(( SystemVulnerabilities.cve_id == CveRuleMapping.cve_id)) ).join( InsightsRule, JOIN.LEFT_OUTER, on=((CveRuleMapping.rule_id == InsightsRule.id) & (InsightsRule.active == True) & (~InsightsRule.rule_only)) ).join( SystemPlatform, on=((SystemVulnerabilities.system_id == SystemPlatform.id) & (SystemPlatform.rh_account_id == rh_account) & (SystemPlatform.opt_out == False) & (SystemPlatform.stale == False) & (SystemPlatform.when_deleted.is_null(True)) & (fn.COALESCE(SystemPlatform.host_type, 'null') != HostType.EDGE)) ).where( SystemVulnerabilities.rh_account_id == rh_account).where( SystemVulnerabilities.when_mitigated.is_null(True))) if cyndi_reuest: query = cyndi_join(query) query = apply_filters(query, args, filters, {}) res = query.first() return { "exploitable_cves": res.exploitable_cves, "critical_cves": res.critical_cves, "important_cves": res.important_cves, "cves_with_rule": res.cves_with_rule }