def intersection(geom_a, geom_b): """Computes the interesction of two bounding boxes.""" intersection_score = func.greatest(0, (func.least(geom_a.x2, geom_b.x2) - func.greatest(geom_a.x1, geom_b.x1))) * \ func.greatest(0, (func.least(geom_a.y2, geom_b.y2) - func.greatest(geom_a.y1, geom_b.y1))) return intersection_score
def _insert_or_update(self, timestamp, vals, lastseen=None): stmt = postgresql.insert(self.tables.passive).values(vals) index = [ 'addr', 'sensor', 'recontype', 'port', 'source', 'value', 'targetval', 'info' ] upsert = { 'firstseen': func.least( self.tables.passive.firstseen, timestamp, ), 'lastseen': func.greatest( self.tables.passive.lastseen, lastseen or timestamp, ), 'count': self.tables.passive.count + stmt.excluded.count, } self.db.execute( stmt.on_conflict_do_update( index_elements=index, set_=upsert, ))
def find_matches(dataset, text, filter=None, exclude=None): entities = Entity.__table__ match_text = normalize(text, dataset)[:254] # select text column and apply necesary transformations text_field = entities.c.name if dataset.normalize_text: text_field = entities.c.normalized if dataset.ignore_case: text_field = func.lower(text_field) text_field = func.left(text_field, 254) # calculate the difference percentage l = func.greatest(1.0, func.least(len(match_text), func.length(text_field))) score = func.greatest(0.0, ((l - func.levenshtein(text_field, match_text)) / l) * 100.0) score = func.max(score).label('score') # coalesce the canonical identifier id_ = func.coalesce(entities.c.canonical_id, entities.c.id).label('id') # apply filters filters = [entities.c.dataset_id==dataset.id, entities.c.invalid==False] if not dataset.match_aliases: filters.append(entities.c.canonical_id==None) if exclude is not None: filters.append(entities.c.id!=exclude) if filter is not None: filters.append(text_field.ilike('%%%s%%' % filter)) q = select([id_, score], and_(*filters), [entities], group_by=[id_], order_by=[score.desc()]) return Matches(q)
def top_10_fields_by_prob(): a = LocalizationTile b = FieldTile a_lo = a.nested_lo.label('a_lo') a_hi = a.nested_hi.label('a_hi') b_lo = b.nested_lo.label('b_lo') b_hi = b.nested_hi.label('b_hi') query1 = db.session.query( a_lo, a_hi, b_lo, b_hi, FieldTile.field_id.label('field_id'), LocalizationTile.localization_id.label('localization_id'), LocalizationTile.probdensity.label('probdensity')) query2 = union( query1.join(b, a_lo.between(b_lo, b_hi)), query1.join(b, b_lo.between(a_lo, a_hi)), ).cte() lo = func.greatest(query2.c.a_lo, query2.c.b_lo) hi = func.least(query2.c.a_hi, query2.c.b_hi) area = (hi - lo + 1) * healpix.PIXEL_AREA prob = func.sum(query2.c.probdensity * area).label('probability') query = db.session.query(query2.c.localization_id, query2.c.field_id, prob).group_by(query2.c.localization_id, query2.c.field_id).order_by( prob.desc()).limit(10) return query.all()
def _insert_or_update(self, timestamp, values, lastseen=None): stmt = insert(self.tables.passive)\ .values(dict(values, addr=utils.force_int2ip(values['addr']))) try: self.db.execute(stmt) except IntegrityError: whereclause = and_( self.tables.passive.addr == values['addr'], self.tables.passive.sensor == values['sensor'], self.tables.passive.recontype == values['recontype'], self.tables.passive.source == values['source'], self.tables.passive.value == values['value'], self.tables.passive.targetval == values['targetval'], self.tables.passive.info == values['info'], self.tables.passive.port == values['port']) upsert = { 'firstseen': func.least( self.tables.passive.firstseen, timestamp, ), 'lastseen': func.greatest( self.tables.passive.lastseen, lastseen or timestamp, ), 'count': self.tables.passive.count + values['count'], } updt = update( self.tables.passive).where(whereclause).values(upsert) self.db.execute(updt)
def _insert_or_update(self, timestamp, values, lastseen=None): stmt = insert(self.tables.passive)\ .values(dict(values, addr=utils.force_int2ip(values['addr']))) try: self.db.execute(stmt) except IntegrityError: whereclause = and_( self.tables.passive.addr == values['addr'], self.tables.passive.sensor == values['sensor'], self.tables.passive.recontype == values['recontype'], self.tables.passive.source == values['source'], self.tables.passive.value == values['value'], self.tables.passive.targetval == values['targetval'], self.tables.passive.info == values['info'], self.tables.passive.port == values['port'] ) upsert = { 'firstseen': func.least( self.tables.passive.firstseen, timestamp, ), 'lastseen': func.greatest( self.tables.passive.lastseen, lastseen or timestamp, ), 'count': self.tables.passive.count + values['count'], } updt = update( self.tables.passive ).where(whereclause).values(upsert) self.db.execute(updt)
def get_observation_nearest_query(args): '''Get an observation of the specified feature from the node nearest to the provided long, lat coordinates. :param args: (ValidatorResult) validated query arguments ''' # TODO(heyzoos) # [ ] Test me! Specifically test property filtering. lng = args.data['lng'] lat = args.data['lat'] feature = args.data['feature'] network = args.data['network'] point_dt = args.data['datetime'] if args.data.get('datetime') else datetime.now() conditions = args.data.get('filter') nearest_nodes_rp = NodeMeta.nearest_neighbor_to( lng, lat, network=network.name, features=[feature.name] ) if not nearest_nodes_rp: return 'No nodes could be found nearby with your target feature.' feature_str = '{}__{}'.format(network.name, feature.name) feature = redshift_base.metadata.tables[feature_str] result = None for row in nearest_nodes_rp: query = redshift_session.query(feature).filter(and_( feature.c.node_id == row.node, feature.c.datetime <= point_dt + timedelta(hours=12), feature.c.datetime >= point_dt - timedelta(hours=12) )) if conditions is not None: query = query.filter(conditions) query = query.order_by( asc( # Ensures that the interval values is always positive, # since the abs() function doesn't work for intervals sqla_fn.greatest(point_dt, feature.c.datetime) - sqla_fn.least(point_dt, feature.c.datetime) ) ) # Magic number 3 because IFTTT tests require at least three results result = query.limit(3).all() if result is not None: break if result is None: return 'Your feature has not been reported on by the nearest 10 ' \ 'nodes at the time provided.' return [format_observation(obs, feature) for obs in result]
def get_band(session, dataset, freq_eff, freq_bw, freq_bw_max=.0): """ Returns the frequency band for the given frequency parameters. Will create a new frequency band entry in the database if no match is found. You can limit the bandwidth of the band association with the freq_bw_max. args: session (sqlalchemy.orm.session.Session): a SQLAlchemy session object dataset (tkp.db.model.Dataset): the TraP dataset freq_eff (float): The central frequency of image to get band for freq_bw (float): The bandwidth of image to get band for freq_bw_max (float): The maximum bandwith used for band association. Not used if 0.0 (default). returns: tkp.db.model.Frequencyband: a frequency band object """ if freq_bw_max == .0: bw_half = freq_bw / 2 low = freq_eff - bw_half high = freq_eff + bw_half else: bw_half = freq_bw_max / 2 low = freq_eff - bw_half high = freq_eff + bw_half w1 = high - low w2 = Frequencyband.freq_high - Frequencyband.freq_low max_ = func.greatest(high, Frequencyband.freq_high) min_ = func.least(low, Frequencyband.freq_low) band = session.query(Frequencyband).filter( (Frequencyband.dataset == dataset) & (max_ - min_ < w1 + w2) ).first() if not band: # no match so we create a new band band = Frequencyband(freq_central=freq_eff, freq_low=low, freq_high=high, dataset=dataset) session.add(band) return band
def decayed_score(score, created_at, peak=5, nominal_timestamp=14 * 24 * 60 * 60): """ Creates a decaying (over time) version of the provided `score`. The returned value is score * a multiplier determined by `peak` and `nominal_timestamp`. Args: score: (number) The base score to modify created_at: (timestamp) The timestamp the score is attributed to peak?: (number) The peak multipler possible Returns: A SQLAlchemy expression representing decayed score (score * multipler) where multipler is represented by: max(0.2, 5 ^ 1 - min(time_ago / nominal_timestamp, 1)) """ return score * func.greatest( func.pow( 5, 1 - func.least(seconds_ago(created_at) / nominal_timestamp, 1)), 0.2)
def find_matches(dataset, text, filter=None, exclude=None): entities = Entity.__table__ match_text = (normalize(text) or '')[:254] # select text column and apply necesary transformations text_field = entities.c.name if dataset.normalize_text: text_field = entities.c.normalized if dataset.ignore_case: text_field = func.lower(text_field) text_field = func.left(text_field, 254) # calculate the difference percentage min_l = func.greatest(1.0, func.least(len(match_text), func.length(text_field))) score = func.greatest( 0.0, ((min_l - func.levenshtein(text_field, match_text)) / min_l) * 100.0) score = func.max(score).label('score') # coalesce the canonical identifier id_ = func.coalesce(entities.c.canonical_id, entities.c.id).label('id') # apply filters filters = [ entities.c.dataset_id == dataset.id, entities.c.invalid == False ] # noqa if not dataset.match_aliases: filters.append(entities.c.canonical_id == None) # noqa if exclude is not None: filters.append(entities.c.id != exclude) if filter is not None: filters.append(text_field.ilike('%%%s%%' % filter)) q = select([id_, score], and_(*filters), [entities], group_by=[id_], order_by=[score.desc()]) return Matches(q)
def find_matches(project, account, text, schemata=[], properties=[]): main = aliased(Property) ent = aliased(Entity) q = db.session.query(main.entity_id) q = q.filter(main.name == "name") q = q.filter(main.entity_id == ent.id) q = q.join(ent) q = q.filter(ent.project_id == project.id) for schema in schemata: obj = aliased(Schema) q = q.join(obj, ent.schema_id == obj.id) q = q.filter(obj.name == schema) for name, value in properties: p = aliased(Property) q = q.join(p, p.entity_id == ent.id) q = q.filter(p.active == True) # noqa q = q.filter(p.name == name) attr = project.get_attribute("entity", name) column = getattr(p, attr.value_column) q = q.filter(column == value) # prepare text fields (todo: further normalization!) text_field = func.left(func.lower(main.value_string), 254) match_text = text.lower().strip()[:254] match_text_db = cast(match_text, types.Unicode) # calculate the difference percentage l = func.greatest(1.0, func.least(len(match_text), func.length(text_field))) score = func.greatest(0.0, ((l - func.levenshtein(text_field, match_text_db)) / l) * 100.0) score = score.label("score") q = q.add_columns(score) q = q.order_by(score.desc()) q = q.filter(score > 50) return Matches(q, account)
def _insert_or_update(self, timestamp, values, lastseen=None, replacecount=False): stmt = insert(self.tables.passive).values( dict(values, addr=utils.force_int2ip(values["addr"]))) try: self.db.execute(stmt) except IntegrityError: whereclause = and_( self.tables.passive.addr == values["addr"], self.tables.passive.sensor == values["sensor"], self.tables.passive.recontype == values["recontype"], self.tables.passive.source == values["source"], self.tables.passive.value == values["value"], self.tables.passive.targetval == values["targetval"], self.tables.passive.info == values["info"], self.tables.passive.port == values["port"], ) upsert = { "firstseen": func.least( self.tables.passive.firstseen, timestamp, ), "lastseen": func.greatest( self.tables.passive.lastseen, lastseen or timestamp, ), "count": (values["count"] if replacecount else self.tables.passive.count + values["count"]), } updt = update( self.tables.passive).where(whereclause).values(upsert) self.db.execute(updt)
def find_matches(project, account, text, schemata=[], properties=[]): main = aliased(Property) ent = aliased(Entity) q = db.session.query(main.entity_id) q = q.filter(main.name == 'name') q = q.filter(main.entity_id == ent.id) q = q.join(ent) q = q.filter(ent.project_id == project.id) if len(schemata): obj = aliased(Schema) q = q.join(obj, ent.schema_id == obj.id) q = q.filter(obj.name.in_(schemata)) for name, value in properties: p = aliased(Property) q = q.join(p, p.entity_id == ent.id) q = q.filter(p.active == True) # noqa q = q.filter(p.name == name) column = getattr(p, p.type_column(value)) q = q.filter(column == value) # prepare text fields (todo: further normalization!) text_field = func.left(func.lower(main.value_string), 254) match_text = text.lower().strip()[:254] match_text_db = cast(match_text, types.Unicode) # calculate the difference percentage l = func.greatest(1.0, func.least(len(match_text), func.length(text_field))) score = func.greatest(0.0, ((l - func.levenshtein(text_field, match_text_db)) / l) * 100.0) score = score.label('score') q = q.group_by(main.entity_id) q = q.add_columns(func.max(score)) q = q.order_by(func.max(score).desc()) q = q.filter(score > 50) return Matches(q, project, account)
'product_code': pct.code, 'product_name': pct.name, 'product_type': pt.name, 'promo': pro.code, 'qty': oi.qty, 'reference': o.reference_number, 'rework_date': r.rework_date, 'rework_qty': r.qty, 'rework_reason': rr.name, 'ship_country': c.name, 'ship_date': i.ship_date, 'ship_method': sm.name, 'state': s.name, 'theme': t.name, 'turnaround': select([func.least(func.sum(cal.is_business_day), 7)]) .where((cal.calendar_date >= func.date(o.order_date)) & (cal.calendar_date <= i.ship_date))} for d in DIMENSIONS: DIMENSIONS[d] = DIMENSIONS[d].label(d) JOINS = {a: a.address_id == o.shipping_address_id, act: wi.activity_id == act.activity_id, c: a.country == c.country_code, can: (can.order_item_id == oi.order_item_id) & can.activity_id.in_([42, 43]), cu: cu.user_id == can.user_id, cur: cur.currency_id == o.currency_id, cus: cus.customer_id == o.customer_id, cc: oif.cover_color_id == cc.cover_color_id, cm: oif.cover_material_id == cm.cover_material_id, dwn: pi.download_path_id == dwn.uri_resource_id,
def _least(cls, *args): """ Produce a "least" sqlalchemy expression (minimum between values) """ #return cls._if(a < b, a, b) return func.least(*args)
primary_key=True, nullable=False, onupdate="CASCADE"), Column('created', DateTime(timezone=False), default=datetime.utcnow, nullable=False), UniqueConstraint('question_parent_id', 'question_child_id', name='question_parent_child_uix') ) # We want to prevent redundant, bidirectional links in tbl # e.g.: (question_parent_id, question_child_id) == # (question_child_id, question_parent_id) # i.e.: # create unique index on question_to_questions (least(A,B), greatest(A,B)); Index( 'question_to_questions_uix', func.least(question_questions.c.question_parent_id, question_questions.c.question_child_id), func.greatest(question_questions.c.question_parent_id, question_questions.c.question_child_id)) class User(core.Base): __tablename__ = "users" id = Column(BigInteger, primary_key=True) username = Column(Unicode, unique=True) email = Column(Unicode, unique=True) tags_subscriptions = relationship('Tag', 'user_to_tags', backref="subscribed_users") def dict(self, verbose=False, minimal=False):
def fetch_billing_for_all_services(): breakdown_period = 'quarter' free_allowances = fetch_sms_free_allowances().subquery() query_cte = dao_get_priced_billing_data().cte() # date breakdowns breakdown_aet = func.date_trunc(breakdown_period, query_cte.c.aet_date).cast(Date()) breakdown_fy = func.date_trunc(breakdown_period, query_cte.c.fy_date).cast(Date()) financial_year = func.date_part('year', query_cte.c.fy_date).cast(Integer()) # calculate in credits in order to work backwards to get available units # after removing cost usage credits_free = free_allowances.c.free_sms_fragment_limit * FRAGMENT_UNIT_RATE # coalesce and sum these totals by month cost = func.sum(query_cte.c.cost) notifications = func.sum(query_cte.c.notifications_sent) notifications_email = func.sum(query_cte.c.notifications_sent).filter( query_cte.c.notification_type == EMAIL_TYPE) notifications_sms = func.sum(query_cte.c.notifications_sent).filter( query_cte.c.notification_type == SMS_TYPE) fragments_domestic = func.sum(query_cte.c.domestic_units) fragments_international = func.sum(query_cte.c.international_units) # cumulative figures for entire year cost_cumulative = func.sum(cost).over(order_by=breakdown_fy, partition_by=(Service.id, financial_year)) cost_starting = cost_cumulative - cost credits_remaining = func.greatest(credits_free - cost_cumulative, 0) credits_available = func.greatest(credits_free - cost_starting, 0) cost_chargeable = func.greatest(cost - credits_available, 0) cost_chargeable_cumulative = func.greatest(cost_cumulative - credits_free, 0) credits_used = func.least(cost, credits_available) units = cost / FRAGMENT_UNIT_RATE units_cumulative = cost_cumulative / FRAGMENT_UNIT_RATE units_chargeable = cost_chargeable / FRAGMENT_UNIT_RATE units_free_used = credits_used / FRAGMENT_UNIT_RATE units_free_available = credits_available / FRAGMENT_UNIT_RATE units_free_remaining = credits_remaining / FRAGMENT_UNIT_RATE units_chargeable_cumulative = cost_chargeable_cumulative / FRAGMENT_UNIT_RATE qry = db.session.query( Service.id.label('service_id'), Service.name.label('service_name'), breakdown_aet.label('breakdown_aet'), breakdown_fy.label('breakdown_fy'), func.date_part('year', breakdown_fy).cast( Integer()).label('breakdown_fy_year'), func.date_part('quarter', breakdown_fy).cast( Integer()).label('breakdown_fy_quarter'), free_allowances.c.free_sms_fragment_limit.label( 'fragments_free_limit'), fragments_domestic.label('fragments_domestic'), fragments_international.label('fragments_international'), notifications.label('notifications'), notifications_sms.label('notifications_sms'), notifications_email.label('notifications_email'), cost.label('cost'), cost_chargeable.label('cost_chargeable'), cost_cumulative.label('cost_cumulative'), cost_chargeable_cumulative.label('cost_chargeable_cumulative'), units.label('units'), units_cumulative.label('units_cumulative'), units_chargeable.label('units_chargeable'), units_chargeable_cumulative.label('units_chargeable_cumulative'), units_free_available.label('units_free_available'), units_free_remaining.label('units_free_remaining'), units_free_used.label('units_free_used'), literal(DOMESTIC_UNIT_RATE * 100).label('unit_rate_domestic'), literal(INTERNATIONAL_UNIT_RATE * 100).label('unit_rate_international'), ).select_from(Service, ).join( query_cte, query_cte.c.service_id == Service.id, ).join( free_allowances, and_( Service.id == free_allowances.c.service_id, financial_year == free_allowances.c.financial_year_start, )).group_by( Service.id, Service.name, financial_year, breakdown_aet, breakdown_fy, free_allowances.c.free_sms_fragment_limit, ).order_by( Service.name, breakdown_fy, ) return qry.all()
def get_direct_messages_previews(cls, user_id, team_id): chats = db.session.query(ChatTableEntry).filter( ChatTableEntry.user_id == user_id).subquery("sq1") last_messages_mixed = db.session.query( func.least(MessageTableEntry.sender_id, MessageTableEntry.receiver_id).label("user1"), func.greatest(MessageTableEntry.sender_id, MessageTableEntry.receiver_id).label("user2"), func.max( MessageTableEntry.timestamp).label("maxtimestamp")).filter( and_( or_(MessageTableEntry.receiver_id == user_id, MessageTableEntry.sender_id == user_id), MessageTableEntry.team_id == team_id, MessageTableEntry.send_type == SendMessageType.DIRECT.value)).group_by( func.least(MessageTableEntry.sender_id, MessageTableEntry.receiver_id), func.greatest( MessageTableEntry.sender_id, MessageTableEntry.receiver_id)).subquery("sq2") last_messages = db.session.query( MessageTableEntry.message_id, MessageTableEntry.sender_id, MessageTableEntry.receiver_id, UserTableEntry.username, UserTableEntry.first_name, UserTableEntry.last_name, UserTableEntry.profile_pic, UserTableEntry.online, BotTableEntry.bot_name, MessageTableEntry.content, MessageTableEntry.message_type, MessageTableEntry.timestamp, UserTableEntry.user_id.label("is_user"), chats.c.unseen ).join( last_messages_mixed, and_( or_( MessageTableEntry.sender_id == last_messages_mixed.c.user1, MessageTableEntry.sender_id == last_messages_mixed.c.user2, ), or_( MessageTableEntry.receiver_id == last_messages_mixed.c.user1, MessageTableEntry.receiver_id == last_messages_mixed.c.user2, ), MessageTableEntry.timestamp == last_messages_mixed.c.maxtimestamp, MessageTableEntry.send_type == SendMessageType.DIRECT.value) ).join( chats, and_( or_( MessageTableEntry.sender_id == chats.c.chat_id, MessageTableEntry.receiver_id == chats.c.chat_id, ), or_( MessageTableEntry.sender_id == chats.c.user_id, MessageTableEntry.receiver_id == chats.c.user_id, )) ).outerjoin( UserTableEntry, or_( and_(UserTableEntry.user_id == last_messages_mixed.c.user1, UserTableEntry.user_id != user_id), and_(UserTableEntry.user_id == last_messages_mixed.c.user2, UserTableEntry.user_id != user_id)) ).outerjoin( BotTableEntry, or_( and_(BotTableEntry.bot_id == last_messages_mixed.c.user1, BotTableEntry.bot_id != user_id), and_(BotTableEntry.bot_id == last_messages_mixed.c.user2, BotTableEntry.bot_id != user_id))).all() return MessageModelMapper.to_direct_messages_previews(last_messages)
def topvalues(self, field, flt=None, topnbr=10, sort=None, limit=None, skip=None, least=False): """ This method makes use of the aggregation framework to produce top values for a given field or pseudo-field. Pseudo-fields are: - category / label / asnum / country / net[:mask] - port - port:open / :closed / :filtered / :<servicename> - portlist:open / :closed / :filtered - countports:open / :closed / :filtered - service / service:<portnbr> - product / product:<portnbr> - cpe / cpe.<part> / cpe:<cpe_spec> / cpe.<part>:<cpe_spec> - devicetype / devicetype:<portnbr> - script:<scriptid> / script:<port>:<scriptid> / script:host:<scriptid> - cert.* / smb.* / sshkey.* - httphdr / httphdr.{name,value} / httphdr:<name> - modbus.* / s7.* / enip.* - mongo.dbs.* - vulns.* - screenwords - file.* / file.*:scriptid - hop """ if flt is None: flt = self.flt_empty base = flt.query( select([self.tables.scan.id ]).select_from(flt.select_from)).cte("base") order = "count" if least else desc("count") outputproc = None if field == "port": field = self._topstructure( self.tables.port, [self.tables.port.protocol, self.tables.port.port], self.tables.port.state == "open") elif field == "ttl": field = self._topstructure( self.tables.port, [self.tables.port.state_reason_ttl], self.tables.port.state_reason_ttl != None, # noqa: E711 (BinaryExpression) ) elif field == "ttlinit": field = self._topstructure( self.tables.port, [ func.least( 255, func.power( 2, func.ceil( func.log(2, self.tables.port.state_reason_ttl)))) ], self.tables.port.state_reason_ttl != None, # noqa: E711 (BinaryExpression) ) outputproc = int elif field.startswith('port:'): info = field[5:] field = self._topstructure( self.tables.port, [self.tables.port.protocol, self.tables.port.port], (self.tables.port.state == info) if info in ['open', 'filtered', 'closed', 'open|filtered'] else (self.tables.port.service_name == info), ) elif field.startswith('countports:'): info = field[11:] return ( {"count": result[0], "_id": result[1]} for result in self.db.execute( select([func.count().label("count"), column('cnt')]) .select_from( select([func.count().label('cnt')]) .select_from(self.tables.port) .where(and_( self.tables.port.state == info, # self.tables.port.scan.in_(base), exists( select([1])\ .select_from(base)\ .where( self.tables.port.scan == base.c.id ) ), ))\ .group_by(self.tables.port.scan)\ .alias('cnt') ).group_by('cnt').order_by(order).limit(topnbr) ) ) elif field.startswith('portlist:'): ### Deux options pour filtrer: ### -1- self.tables.port.scan.in_(base), ### -2- exists(select([1])\ ### .select_from(base)\ ### .where( ### self.tables.port.scan == base.c.id ### )), ### ### D'après quelques tests, l'option -1- est plus beaucoup ### rapide quand (base) est pas ou peu sélectif, l'option ### -2- un peu plus rapide quand (base) est très sélectif ### ### TODO: vérifier si c'est pareil pour: ### - countports:open ### - tous les autres info = field[9:] return ( { "count": result[0], "_id": [(proto, int(port)) for proto, port in ( elt.split(',') for elt in result[1][3:-3].split(')","('))] } for result in self.db.execute( select([func.count().label("count"), column('ports')]). select_from( select([ func.array_agg( postgresql.aggregate_order_by( tuple_(self.tables.port.protocol, self. tables.port.port).label('a'), tuple_( self.tables.port.protocol, self.tables. port.port).label('a'))).label('ports'), ]).where( and_( self.tables.port.state == info, self.tables.port.scan.in_( base), # exists(select([1])\ # .select_from(base)\ # .where( # self.tables.port.scan == base.c.id # )), )).group_by(self.tables.port.scan).alias('ports') ).group_by('ports').order_by(order).limit(topnbr))) elif field == "service": field = self._topstructure(self.tables.port, [self.tables.port.service_name], self.tables.port.state == "open") elif field.startswith("service:"): info = field[8:] if '/' in info: info = info.split('/', 1) field = self._topstructure( self.tables.port, [self.tables.port.service_name], and_(self.tables.port.protocol == info[0], self.tables.port.port == int(info[1])), ) else: field = self._topstructure(self.tables.port, [self.tables.port.service_name], self.tables.port.port == int(info)) elif field == "product": field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product ], self.tables.port.state == "open", ) elif field.startswith("product:"): info = field[8:] if info.isdigit(): info = int(info) flt = self.flt_and(flt, self.searchport(info)) field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product ], and_(self.tables.port.state == "open", self.tables.port.port == info), ) elif info.startswith('tcp/') or info.startswith('udp/'): info = (info[:3], int(info[4:])) flt = self.flt_and(flt, self.searchport(info[1], protocol=info[0])) field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product ], and_(self.tables.port.state == "open", self.tables.port.port == info[1], self.tables.port.protocol == info[0]), ) else: flt = self.flt_and(flt, self.searchservice(info)) field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product ], and_(self.tables.port.state == "open", self.tables.port.service_name == info), ) elif field == "devicetype": field = self._topstructure(self.tables.port, [self.tables.port.service_devicetype], self.tables.port.state == "open") elif field.startswith("devicetype:"): info = field[11:] if info.isdigit(): info = int(info) flt = self.flt_and(flt, self.searchport(info)) field = self._topstructure( self.tables.port, [self.tables.port.service_devicetype], and_(self.tables.port.state == "open", self.tables.port.port == info)) elif info.startswith('tcp/') or info.startswith('udp/'): info = (info[:3], int(info[4:])) flt = self.flt_and(flt, self.searchport(info[1], protocol=info[0])) field = self._topstructure( self.tables.port, [self.tables.port.service_devicetype], and_(self.tables.port.state == "open", self.tables.port.port == info[1], self.tables.port.protocol == info[0])) else: flt = self.flt_and(flt, self.searchservice(info)) field = self._topstructure( self.tables.port, [self.tables.port.service_devicetype], and_(self.tables.port.state == "open", self.tables.port.service_name == info)) elif field == "version": field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product, self.tables.port.service_version ], self.tables.port.state == "open", ) elif field.startswith("version:"): info = field[8:] if info.isdigit(): info = int(info) flt = self.flt_and(flt, self.searchport(info)) field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product, self.tables.port.service_version ], and_(self.tables.port.state == "open", self.tables.port.port == info), ) elif info.startswith('tcp/') or info.startswith('udp/'): info = (info[:3], int(info[4:])) flt = self.flt_and(flt, self.searchport(info[1], protocol=info[0])) field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product, self.tables.port.service_version ], and_(self.tables.port.state == "open", self.tables.port.port == info[1], self.tables.port.protocol == info[0]), ) elif ':' in info: info = info.split(':', 1) flt = self.flt_and( flt, self.searchproduct(info[1], service=info[0])) field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product, self.tables.port.service_version ], and_(self.tables.port.state == "open", self.tables.port.service_name == info[0], self.tables.port.service_product == info[1]), ) else: flt = self.flt_and(flt, self.searchservice(info)) field = self._topstructure( self.tables.port, [ self.tables.port.service_name, self.tables.port.service_product, self.tables.port.service_version ], and_(self.tables.port.state == "open", self.tables.port.service_name == info), ) elif field == "asnum": field = self._topstructure(self.tables.scan, [self.tables.scan.info["as_num"]]) elif field == "as": field = self._topstructure(self.tables.scan, [ self.tables.scan.info["as_num"], self.tables.scan.info["as_name"] ]) elif field == "country": field = self._topstructure(self.tables.scan, [ self.tables.scan.info["country_code"], self.tables.scan.info["country_name"] ]) elif field == "city": field = self._topstructure(self.tables.scan, [ self.tables.scan.info["country_code"], self.tables.scan.info["city"] ]) elif field == "net" or field.startswith("net:"): info = field[4:] info = int(info) if info else 24 field = self._topstructure( self.tables.scan, [func.set_masklen(text("scan.addr::cidr"), info)], ) elif field == "script" or field.startswith("script:"): info = field[7:] if info: field = self._topstructure(self.tables.script, [self.tables.script.output], self.tables.script.name == info) else: field = self._topstructure(self.tables.script, [self.tables.script.name]) elif field in ["category", "categories"]: field = self._topstructure(self.tables.category, [self.tables.category.name]) elif field.startswith('cert.'): subfield = field[5:] field = self._topstructure( self.tables.script, [self.tables.script.data['ssl-cert'][subfield]], and_(self.tables.script.name == 'ssl-cert', self.tables.script.data['ssl-cert'].has_key( subfield))) # noqa: W601 (BinaryExpression) elif field == "source": field = self._topstructure(self.tables.scan, [self.tables.scan.source]) elif field == "domains": field = self._topstructure( self.tables.hostname, [func.unnest(self.tables.hostname.domains)]) elif field.startswith("domains:"): level = int(field[8:]) - 1 base1 = (select([ func.unnest(self.tables.hostname.domains).label("domains") ]).where( exists( select([1]).select_from(base).where( self.tables.hostname.scan == base.c.id))).cte("base1")) return ({ "count": result[1], "_id": result[0] } for result in self.db.execute( select([base1.c.domains, func.count().label("count")]).where( base1.c.domains.op('~') ('^([^\\.]+\\.){%d}[^\\.]+$' % level)).group_by(base1.c.domains).order_by( order).limit(topnbr))) elif field == "hop": field = self._topstructure(self.tables.hop, [self.tables.hop.ipaddr]) elif field.startswith('hop') and field[3] in ':>': ttl = int(field[4:]) field = self._topstructure( self.tables.hop, [self.tables.hop.ipaddr], (self.tables.hop.ttl > ttl) if field[3] == '>' else (self.tables.hop.ttl == ttl), ) elif field == 'file' or (field.startswith('file') and field[4] in '.:'): if field.startswith('file:'): scripts = field[5:] if '.' in scripts: scripts, field = scripts.split('.', 1) else: field = 'filename' scripts = scripts.split(',') flt = (self.tables.script.name == scripts[0] if len(scripts) == 1 else self.tables.script.name.in_(scripts)) else: field = field[5:] or 'filename' flt = True field = self._topstructure( self.tables.script, [ func.jsonb_array_elements( func.jsonb_array_elements( self.tables.script.data['ls']['volumes']).op('->') ('files')).op('->>')(field).label(field) ], and_( flt, self.tables.script.data.op('@>')( '{"ls": {"volumes": [{"files": []}]}}'), ), ) elif field.startswith('modbus.'): subfield = field[7:] field = self._topstructure( self.tables.script, [self.tables.script.data['modbus-discover'][subfield]], and_( self.tables.script.name == 'modbus-discover', self.tables.script.data['modbus-discover'].has_key( subfield)), # noqa: W601 (BinaryExpression) ) elif field.startswith('s7.'): subfield = field[3:] field = self._topstructure( self.tables.script, [self.tables.script.data['s7-info'][subfield]], and_(self.tables.script.name == 's7-info', self.tables.script.data['s7-info'].has_key(subfield)), # noqa: W601 (BinaryExpression) ) elif field == 'httphdr': flt = self.flt_and(flt, self.searchscript(name="http-headers")) field = self._topstructure( self.tables.script, [ column("hdr").op('->>')('name').label("name"), column("hdr").op('->>')('value').label("value") ], self.tables.script.name == 'http-headers', [column("name"), column("value")], func.jsonb_array_elements( self.tables.script.data['http-headers']).alias('hdr'), ) elif field.startswith('httphdr.'): flt = self.flt_and(flt, self.searchscript(name="http-headers")) field = self._topstructure( self.tables.script, [column("hdr").op('->>')(field[8:]).label("topvalue")], self.tables.script.name == 'http-headers', [column("topvalue")], func.jsonb_array_elements( self.tables.script.data['http-headers']).alias('hdr'), ) elif field.startswith('httphdr:'): flt = self.flt_and(flt, self.searchhttphdr(name=field[8:].lower())) field = self._topstructure( self.tables.script, [column("hdr").op('->>')("value").label("value")], and_(self.tables.script.name == 'http-headers', column("hdr").op('->>')("name") == field[8:].lower()), [column("value")], func.jsonb_array_elements( self.tables.script.data['http-headers']).alias('hdr'), ) else: raise NotImplementedError() s_from = { self.tables.script: join(self.tables.script, self.tables.port), self.tables.port: self.tables.port, self.tables.category: join(self.tables.association_scan_category, self.tables.category), self.tables.hostname: self.tables.hostname, self.tables.hop: join(self.tables.trace, self.tables.hop), } where_clause = { self.tables.script: self.tables.port.scan == base.c.id, self.tables.port: self.tables.port.scan == base.c.id, self.tables.category: self.tables.association_scan_category.scan == base.c.id, self.tables.hostname: self.tables.hostname.scan == base.c.id, self.tables.hop: self.tables.trace.scan == base.c.id } if field.base == self.tables.scan: req = flt.query( select([func.count().label("count")] + field.fields).select_from( self.tables.scan).group_by(*field.fields)) else: req = (select([func.count().label("count")] + field.fields).select_from(s_from[field.base])) if field.extraselectfrom is not None: req = req.select_from(field.extraselectfrom) req = (req.group_by( *(field.fields if field.group_by is None else field.group_by )).where( exists( select([1]).select_from(base).where( where_clause[field.base])))) if field.where is not None: req = req.where(field.where) if outputproc is None: return ({ "count": result[0], "_id": result[1:] if len(result) > 2 else result[1] } for result in self.db.execute(req.order_by(order).limit(topnbr))) else: return ({ "count": result[0], "_id": outputproc(result[1:] if len(result) > 2 else result[1]) } for result in self.db.execute(req.order_by(order).limit(topnbr)))
def _store_host(self, host): addr = self.convert_ip(host['addr']) info = host.get('infos') if 'coordinates' in (info or {}).get('loc', {}): info['coordinates'] = info.pop('loc')['coordinates'][::-1] source = host.get('source', []) host_tstart = utils.all2datetime(host['starttime']) host_tstop = utils.all2datetime(host['endtime']) insrt = postgresql.insert(self.tables.scan) scanid, scan_tstop = self.db.execute( insrt.values( addr=addr, source=source, info=info, time_start=host_tstart, time_stop=host_tstop, **dict( (key, host.get(key)) for key in ['state', 'state_reason', 'state_reason_ttl'] if key in host)).on_conflict_do_update( index_elements=['addr'], set_={ 'source': self.tables.scan.source + insrt.excluded.source, 'time_start': func.least( self.tables.scan.time_start, insrt.excluded.time_start, ), 'time_stop': func.greatest( self.tables.scan.time_stop, insrt.excluded.time_stop, ), }, ).returning(self.tables.scan.id, self.tables.scan.time_stop)).fetchone() newest = scan_tstop <= host_tstop for category in host.get("categories", []): insrt = postgresql.insert(self.tables.category) catid = self.db.execute( insrt.values(name=category).on_conflict_do_update( index_elements=['name'], set_={ 'name': insrt.excluded.name }).returning(self.tables.category.id)).fetchone()[0] self.db.execute( postgresql.insert( self.tables.association_scan_category).values( scan=scanid, category=catid).on_conflict_do_nothing()) for port in host.get('ports', []): scripts = port.pop('scripts', []) # FIXME: handle screenshots for fld in [ 'screendata', 'screenshot', 'screenwords', 'service_method' ]: try: del port[fld] except KeyError: pass if 'service_servicefp' in port: port['service_fp'] = port.pop('service_servicefp') if 'state_state' in port: port['state'] = port.pop('state_state') if 'state_reason_ip' in port: port['state_reason_ip'] = self.convert_ip( port['state_reason_ip']) insrt = postgresql.insert(self.tables.port) portid = self.db.execute( insrt.values(scan=scanid, **port).on_conflict_do_update( index_elements=['scan', 'port', 'protocol'], set_=dict(scan=scanid, **(port if newest else {}))).returning( self.tables.port.id)).fetchone()[0] for script in scripts: name, output = script.pop('id'), script.pop('output') if newest: insrt = postgresql.insert(self.tables.script) self.bulk.append( insrt.values(port=portid, name=name, output=output, data=script).on_conflict_do_update( index_elements=['port', 'name'], set_={ "output": insrt.excluded.output, "data": insrt.excluded.data, }, )) else: insrt = postgresql.insert(self.tables.script) self.bulk.append( insrt.values(port=portid, name=name, output=output, data=script).on_conflict_do_nothing()) for trace in host.get('traces', []): traceid = self.db.execute( postgresql.insert(self.tables.trace).values( scan=scanid, port=trace.get('port'), protocol=trace['protocol']).on_conflict_do_nothing(). returning(self.tables.trace.id)).fetchone()[0] for hop in trace.get('hops'): hop['ipaddr'] = self.convert_ip(hop['ipaddr']) self.bulk.append( postgresql.insert(self.tables.hop).values( trace=traceid, ipaddr=self.convert_ip(hop['ipaddr']), ttl=hop["ttl"], rtt=None if hop["rtt"] == '--' else hop["rtt"], host=hop.get("host"), domains=hop.get("domains"), )) for hostname in host.get('hostnames', []): self.bulk.append( postgresql.insert(self.tables.hostname).values( scan=scanid, domains=hostname.get('domains'), name=hostname.get('name'), type=hostname.get('type'), ).on_conflict_do_nothing()) utils.LOGGER.debug("VIEW STORED: %r", scanid) return scanid
def execute(self, message, user, params): alliance = Alliance() race = None size_mod = None size = None value_mod = None value = None bash = False attacker = user.planet cluster = None params = params.group(1).split() for p in params: m = self.bashre.match(p) if m and not bash: bash = True continue m = self.clusterre.match(p) if m and not cluster: cluster = int(m.group(1)) m = self.racere.match(p) if m and not race: race = m.group(1) continue m = self.rangere.match(p) if m and not size and int(m.group(2)) < 32768: size_mod = m.group(1) or '>' size = m.group(2) continue m = self.rangere.match(p) if m and not value: value_mod = m.group(1) or '<' value = m.group(2) continue m = self.alliancere.match(p) if m and not alliance.name and not self.clusterre.match(p): alliance = Alliance(name="Unknown") if m.group( 1).lower() == "unknown" else Alliance.load(m.group(1)) if alliance is None: message.reply("No alliance matching '%s' found" % (m.group(1), )) return continue maxcap = PA.getfloat("roids", "maxcap") mincap = PA.getfloat("roids", "mincap") modifier = (cast(Planet.value, Float).op("/")(float(attacker.value))).op("^")(0.5) caprate = func.greatest(mincap, func.least(modifier.op("*")(maxcap), maxcap)) maxcap = cast(func.floor(cast(Planet.size, Float).op("*")(caprate)), Integer) bravery = func.greatest(0.2,func.least(2.2, cast(Planet.score,Float).op("/")(float(attacker.score)))-0.2) \ * func.greatest(0.2,func.least(1.8, cast(Planet.value,Float).op("/")(float(attacker.value)))-0.1) \ / ((6+max(4.0, float(attacker.score)/float(attacker.value)))/10.0) xp_gain = cast(func.floor(maxcap.op("*")(bravery.op("*")(10.0))), Integer) Q = session.query(Planet, Intel, xp_gain.label("xp_gain")) if alliance.id: Q = Q.join(Planet.intel) Q = Q.filter(Intel.alliance == alliance) else: Q = Q.outerjoin(Planet.intel) if alliance.name: Q = Q.filter(Intel.alliance == None) Q = Q.filter(Planet.active == True) if race: Q = Q.filter(Planet.race.ilike(race)) if size: Q = Q.filter(Planet.size.op(size_mod)(size)) if value: Q = Q.filter(Planet.value.op(value_mod)(value)) if bash: Q = Q.filter( or_( Planet.value.op(">")(attacker.value * PA.getfloat("bash", "value")), Planet.score.op(">")(attacker.score * PA.getfloat("bash", "score")))) if cluster: Q = Q.filter(Planet.x == cluster) Q = Q.order_by(desc("xp_gain")) Q = Q.order_by(desc(Planet.idle)) Q = Q.order_by(desc(Planet.value)) result = Q[:6] if len(result) < 1: reply = "No" if race: reply += " %s" % (race, ) reply += " planets" if alliance.name: reply += " in intel matching Alliance: %s" % (alliance.name, ) else: reply += " matching" if size: reply += " Size %s %s" % (size_mod, size) if value: reply += " Value %s %s" % (value_mod, value) message.reply(reply) return replies = [] for planet, intel, xp_gain in result[:5]: reply = "%s:%s:%s (%s)" % (planet.x, planet.y, planet.z, planet.race) reply += " Value: %s Size: %s Scoregain: %d" % ( planet.value, planet.size, xp_gain * PA.getint("numbers", "xp_value")) if intel: if intel.nick: reply += " Nick: %s" % (intel.nick, ) if not alliance.name and intel.alliance: reply += " Alliance: %s" % (intel.alliance.name, ) replies.append(reply) if len(result) > 5: replies[ -1] += " (Too many results to list, please refine your search)" message.reply("\n".join(replies))
def insert_or_update_bulk(self, specs, getinfos=None, separated_timestamps=True): """Like `.insert_or_update()`, but `specs` parameter has to be an iterable of `(timestamp, spec)` (if `separated_timestamps` is True) or `spec` (if it is False) values. This will perform PostgreSQL COPY FROM inserts with the major drawback that the `getinfos` parameter will be called (if it is not `None`) for each spec, even when the spec already exists in the database and the call was hence unnecessary. It's up to you to decide whether having bulk insert is worth it or if you want to go with the regular `.insert_or_update()` method. """ more_to_read = True tmp = self.create_tmp_table(self.tables.passive) if config.DEBUG_DB: total_upserted = 0 total_start_time = time.time() while more_to_read: if config.DEBUG_DB: start_time = time.time() with PassiveCSVFile(specs, self.convert_ip, tmp, getinfos=getinfos, separated_timestamps=separated_timestamps, limit=config.POSTGRES_BATCH_SIZE) as fdesc: self.copy_from(fdesc, tmp.name) more_to_read = fdesc.more_to_read if config.DEBUG_DB: count_upserted = fdesc.count insrt = postgresql.insert(self.tables.passive) self.db.execute( insrt.from_select( [column(col) for col in [ 'addr', # sum / min / max 'count', 'firstseen', 'lastseen', # grouped 'sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo' ]], select([tmp.columns['addr'], func.sum_(tmp.columns['count']), func.min_(tmp.columns['firstseen']), func.max_(tmp.columns['lastseen'])] + [ tmp.columns[col] for col in [ 'sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo']])\ .group_by(*(tmp.columns[col] for col in [ 'addr', 'sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo' ])) )\ .on_conflict_do_update( index_elements=['addr', 'sensor', 'recontype', 'port', 'source', 'value', 'targetval', 'info'], set_={ 'firstseen': func.least( self.tables.passive.firstseen, insrt.excluded.firstseen, ), 'lastseen': func.greatest( self.tables.passive.lastseen, insrt.excluded.lastseen, ), 'count': self.tables.passive.count + insrt.excluded.count, }, ) ) self.db.execute(delete(tmp)) if config.DEBUG_DB: stop_time = time.time() time_spent = stop_time - start_time total_upserted += count_upserted total_time_spent = stop_time - total_start_time utils.LOGGER.debug( "DB:PERFORMANCE STATS %s upserts, %f s, %s/s\n" "\ttotal: %s upserts, %f s, %s/s", utils.num2readable(count_upserted), time_spent, utils.num2readable(count_upserted / time_spent), utils.num2readable(total_upserted), total_time_spent, utils.num2readable(total_upserted / total_time_spent), )
def fetch_annual_billing(service_id, year): start_date, end_date = get_financial_year_range(year) fragments_free_limit = literal( fetch_sms_free_allowance_for_financial_year(service_id, year)) query = dao_get_priced_billing_data().filter( FactBilling.service_id == service_id, FactBilling.aet_date >= start_date, FactBilling.aet_date <= end_date, ) # break down usage by month with empty gaps by generating a series months_series = func.generate_series(start_date, end_date, '1 months').alias('month') months_series_c = column('month').cast(Date()).label('month') query_cte = query.cte() # calculate in credits in order to work backwards to get available units # after removing cost usage credits_free = fragments_free_limit * FRAGMENT_UNIT_RATE # coalesce and sum these totals by month cost = total(query_cte.c.cost) notifications = total(query_cte.c.notifications_sent) notifications_email_ = func.sum(query_cte.c.notifications_sent).filter( query_cte.c.notification_type == EMAIL_TYPE) notifications_sms_ = func.sum(query_cte.c.notifications_sent).filter( query_cte.c.notification_type == SMS_TYPE) notifications_email = func.coalesce(notifications_email_, 0) notifications_sms = func.coalesce(notifications_sms_, 0) fragments_domestic = total(query_cte.c.domestic_units) fragments_international = total(query_cte.c.international_units) # cumulative figures for entire year cost_cumulative = func.sum(cost).over(order_by=months_series_c) cost_starting = cost_cumulative - cost credits_remaining = func.greatest(credits_free - cost_cumulative, 0) credits_available = func.greatest(credits_free - cost_starting, 0) cost_chargeable = func.greatest(cost - credits_available, 0) cost_chargeable_cumulative = func.greatest(cost_cumulative - credits_free, 0) credits_used = func.least(cost, credits_available) units = cost / FRAGMENT_UNIT_RATE units_cumulative = cost_cumulative / FRAGMENT_UNIT_RATE units_chargeable = cost_chargeable / FRAGMENT_UNIT_RATE units_free_used = credits_used / FRAGMENT_UNIT_RATE units_free_available = credits_available / FRAGMENT_UNIT_RATE units_free_remaining = credits_remaining / FRAGMENT_UNIT_RATE units_chargeable_cumulative = cost_chargeable_cumulative / FRAGMENT_UNIT_RATE gapfilled_query = db.session.query( months_series_c, fragments_free_limit.label('fragments_free_limit'), fragments_domestic.label('fragments_domestic'), fragments_international.label('fragments_international'), notifications.label('notifications'), notifications_sms.label('notifications_sms'), notifications_email.label('notifications_email'), cost.label('cost'), cost_chargeable.label('cost_chargeable'), cost_cumulative.label('cost_cumulative'), cost_chargeable_cumulative.label('cost_chargeable_cumulative'), units.label('units'), units_cumulative.label('units_cumulative'), units_chargeable.label('units_chargeable'), units_chargeable_cumulative.label('units_chargeable_cumulative'), units_free_available.label('units_free_available'), units_free_remaining.label('units_free_remaining'), units_free_used.label('units_free_used'), literal(DOMESTIC_UNIT_RATE * 100).label('unit_rate_domestic'), literal(INTERNATIONAL_UNIT_RATE * 100).label('unit_rate_international'), ).select_from(months_series, ).outerjoin( query_cte, query_cte.c.aet_month == months_series_c, ).group_by(months_series_c, ).order_by(months_series_c) return gapfilled_query
def centerline_query(session, detection): """Finds the centerline orientation that most closely agrees with detection-intersected roadbeds.""" # pylint: disable-msg=E1101 car_polygon = Detection.geom car_polygon102718 = func.ST_Transform(car_polygon, 102718) car_filter = func.ST_Intersects( Roadbed.geom, car_polygon102718 ) query = session.query( Roadbed.gid) \ .filter(Detection.id == detection.id) \ .filter(car_filter) road_gids = query.all() if len(road_gids) == 0: return lat, lon, alt = session.query( func.ST_Y(Detection.lla), func.ST_X(Detection.lla), func.ST_Z(Detection.lla)) \ .filter(Detection.id == detection.id) \ .one() lla = numpy.array([[lat, lon, alt]]) enu = pygeo.LLAToENU(lla).reshape((3, 3)) roadbeds4326 = func.ST_Transform(Roadbed.geom, 4326) centerlines4326 = PlanetOsmLine.way centerline_filter = func.ST_Intersects(roadbeds4326, centerlines4326) centerline_frac = func.ST_Line_Locate_Point( centerlines4326, Detection.lla) centerline_start_frac = func.least(1, centerline_frac + 0.01) centerline_end_frac = func.greatest(0, centerline_frac - 0.01) centerline_start = func.ST_Line_Interpolate_Point(centerlines4326, centerline_start_frac) centerline_end = func.ST_Line_Interpolate_Point(centerlines4326, centerline_end_frac) segments = session.query( func.ST_Y(centerline_start).label('lats'), func.ST_X(centerline_start).label('lons'), func.ST_Y(centerline_end).label('late'), func.ST_X(centerline_end).label('lone'), PlanetOsmLine.oneway) \ .filter(Detection.id == detection.id) \ .filter(centerline_filter) \ .filter(Roadbed.gid.in_(road_gids)) \ .filter(PlanetOsmLine.osm_id >= 0) \ .filter(PlanetOsmLine.railway.__eq__(None)) # pylint: enable-msg=E1101 for segment in segments: segment_start = pygeo.LLAToECEF(numpy.array( [[segment.lats, segment.lons, alt]], dtype=numpy.float64 )) segment_end = pygeo.LLAToECEF(numpy.array( [[segment.late, segment.lone, alt]], dtype=numpy.float64 )) segment_dir = (segment_end - segment_start) segment_dir /= numpy.linalg.norm(segment_dir) segment_rot = enu.T.dot(segment_dir.T) segment_angle = math.atan2(segment_rot[1], segment_rot[0]) yield segment_angle, segment.oneway
def execute(self, message, user, params): alliance=Alliance() race=None size_mod=None size=None value_mod=None value=None bash=False attacker=user.planet cluster=None params=params.group(1).split() for p in params: m=self.bashre.match(p) if m and not bash: bash=True continue m=self.clusterre.match(p) if m and not cluster: cluster=int(m.group(1)) m=self.racere.match(p) if m and not race: race=m.group(1) continue m=self.rangere.match(p) if m and not size and int(m.group(2)) < 32768: size_mod=m.group(1) or '>' size=m.group(2) continue m=self.rangere.match(p) if m and not value: value_mod=m.group(1) or '<' value=m.group(2) continue m=self.alliancere.match(p) if m and not alliance.name and not self.clusterre.match(p): alliance = Alliance(name="Unknown") if m.group(1).lower() == "unknown" else Alliance.load(m.group(1)) if alliance is None: message.reply("No alliance matching '%s' found" % (m.group(1),)) return continue maxcap = PA.getfloat("roids","maxcap") mincap = PA.getfloat("roids","mincap") modifier = (cast(Planet.value,Float).op("/")(float(attacker.value))).op("^")(0.5) caprate = func.greatest(mincap,func.least(modifier.op("*")(maxcap),maxcap)) maxcap = cast(func.floor(cast(Planet.size,Float).op("*")(caprate)),Integer) bravery = (func.greatest(0.0,( func.least(2.0, cast(Planet.value,Float).op("/")(float(attacker.value)))-0.1) * (func.least(2.0, cast(Planet.score,Float).op("/")(float(attacker.score)))-0.2))).op("*")(10.0) xp_gain = cast(func.floor(maxcap.op("*")(bravery)),Integer) Q = session.query(Planet, Intel, xp_gain.label("xp_gain")) if alliance.id: Q = Q.join(Planet.intel) Q = Q.filter(Intel.alliance == alliance) else: Q = Q.outerjoin(Planet.intel) if alliance.name: Q = Q.filter(Intel.alliance == None) Q = Q.filter(Planet.active == True) if race: Q = Q.filter(Planet.race.ilike(race)) if size: Q = Q.filter(Planet.size.op(size_mod)(size)) if value: Q = Q.filter(Planet.value.op(value_mod)(value)) if bash: Q = Q.filter(or_(Planet.value.op(">")(attacker.value*PA.getfloat("bash","value")), Planet.score.op(">")(attacker.score*PA.getfloat("bash","score")))) if cluster: Q = Q.filter(Planet.x == cluster) Q = Q.order_by(desc("xp_gain")) Q = Q.order_by(desc(Planet.idle)) Q = Q.order_by(desc(Planet.value)) result = Q[:6] if len(result) < 1: reply="No" if race: reply+=" %s"%(race,) reply+=" planets" if alliance.name: reply+=" in intel matching Alliance: %s"%(alliance.name,) else: reply+=" matching" if size: reply+=" Size %s %s" % (size_mod,size) if value: reply+=" Value %s %s" % (value_mod,value) message.reply(reply) return replies = [] for planet, intel, xp_gain in result[:5]: reply="%s:%s:%s (%s)" % (planet.x,planet.y,planet.z,planet.race) reply+=" Value: %s Size: %s Scoregain: %d" % (planet.value,planet.size, xp_gain*PA.getint("numbers", "xp_value")) if intel: if intel.nick: reply+=" Nick: %s" % (intel.nick,) if not alliance.name and intel.alliance: reply+=" Alliance: %s" % (intel.alliance.name,) replies.append(reply) if len(result) > 5: replies[-1]+=" (Too many results to list, please refine your search)" message.reply("\n".join(replies))
def execute(self, message, user, params): alliance=Alliance() race=None size_mod=None size=None value_mod=None value=None bash=False attacker=user.planet cluster=None limit=5 params=params.group(1).split() for p in params: m=self.bashre.match(p) if m and not bash: bash=True continue m=self.clusterre.match(p) if m and not cluster: cluster=int(m.group(1)) m=self.racere.match(p) if m and not race: race=m.group(1) continue m=self.rangere.match(p) if m and not size and int(m.group(2)) < 32768: size_mod=m.group(1) or '>' size=m.group(2) continue m=self.rangere.match(p) if m and not value: value_mod=m.group(1) or '<' value=m.group(2) continue m=self.alliancere.match(p) if m and not alliance.name and not self.clusterre.match(p): alliance = Alliance(name="Unknown") if m.group(1).lower() == "unknown" else Alliance.load(m.group(1)) if alliance is None: message.reply("No alliance matching '%s' found" % (m.group(1),)) return continue if p[:4] == "lots" and user.is_admin(): limit = int(p[4:]) maxcap = PA.getfloat("roids","maxcap") mincap = PA.getfloat("roids","mincap") modifier = (cast(Planet.value,Float).op("/")(float(attacker.value))).op("^")(0.5) caprate = func.greatest(mincap,func.least(modifier.op("*")(maxcap),maxcap)) maxcap = cast(func.floor(cast(Planet.size,Float).op("*")(caprate)),Integer) Q = session.query(Planet, Intel, maxcap.label("maxcap")) if alliance.id: Q = Q.join(Planet.intel) Q = Q.filter(Intel.alliance == alliance) else: Q = Q.outerjoin(Planet.intel) if alliance.name: Q = Q.filter(Intel.alliance == None) Q = Q.filter(Planet.active == True) if race: Q = Q.filter(Planet.race.ilike(race)) if size: Q = Q.filter(Planet.size.op(size_mod)(size)) if value: Q = Q.filter(Planet.value.op(value_mod)(value)) if bash: Q = Q.filter(or_(Planet.value.op(">")(attacker.value*PA.getfloat("bash","value")), Planet.score.op(">")(attacker.score*PA.getfloat("bash","score")))) if cluster: Q = Q.filter(Planet.x == cluster) Q = Q.order_by(desc("maxcap")) Q = Q.order_by(desc(Planet.size)) Q = Q.order_by(desc(Planet.value)) result = Q[:(limit+1)] if len(result) < 1: reply="No" if race: reply+=" %s"%(race,) reply+=" planets" if alliance.name: reply+=" in intel matching Alliance: %s"%(alliance.name,) else: reply+=" matching" if size: reply+=" Size %s %s" % (size_mod,size) if value: reply+=" Value %s %s" % (value_mod,value) message.reply(reply) return replies = [] for planet, intel, maxcap in result[:limit]: reply="%s:%s:%s (%s)" % (planet.x,planet.y,planet.z,planet.race) reply+=" Value: %s Size: %s MaxCap: %s" % (planet.value,planet.size, maxcap) if intel: if intel.nick: reply+=" Nick: %s" % (intel.nick,) if not alliance.name and intel.alliance: reply+=" Alliance: %s" % (intel.alliance.name,) replies.append(reply) if len(replies) == 5: message.reply("\n".join(replies)) replies = [] sleep(3) if len(result) > limit: replies.append("(Too many results to list, please refine your search)") message.reply("\n".join(replies))
def store_host(self, host, merge=False): addr = self.convert_ip(host['addr']) info = host.get('infos') if 'coordinates' in (info or {}).get('loc', {}): info['coordinates'] = info.pop('loc')['coordinates'][::-1] source = host.get('source', '') if merge: insrt = postgresql.insert(Scan) scanid, scan_tstop, merge = self.db.execute( insrt.values( addr=addr, source=source, info=info, time_start=host['starttime'], time_stop=host['endtime'], archive=0, merge=False, **dict( (key, host.get(key)) for key in ['state', 'state_reason', 'state_reason_ttl'] if key in host ) ) .on_conflict_do_update( index_elements=['addr', 'source', 'archive'], set_={ 'time_start': func.least( Scan.time_start, insrt.excluded.time_start, ), 'time_stop': func.greatest( Scan.time_stop, insrt.excluded.time_stop, ), 'merge': True, }, ) .returning(Scan.id, Scan.time_stop, Scan.merge)).fetchone() if merge: # Test should be ==, using <= in case of rounding # issues. newest = scan_tstop <= host['endtime'] else: newest = None else: curarchive = self.db.execute(select([func.max(Scan.archive)]) .where(and_(Scan.addr == addr, Scan.source == source)))\ .fetchone()[0] if curarchive is not None: self.db.execute(update(Scan).where(and_( Scan.addr == addr, Scan.source == source, Scan.archive == 0, )).values(archive=curarchive + 1)) scanid = self.db.execute( insert(Scan).values( addr=addr, source=source, info=info, time_start=host['starttime'], time_stop=host['endtime'], state=host['state'], state_reason=host['state_reason'], state_reason_ttl=host.get('state_reason_ttl'), archive=0, merge=False, ).returning(Scan.id) ).fetchone()[0] insrt = postgresql.insert(Association_Scan_ScanFile) self.db.execute(insrt .values(scan=scanid, scan_file=utils.decode_hex(host['scanid'])) .on_conflict_do_nothing()) for category in host.get("categories", []): insrt = postgresql.insert(Category) catid = self.db.execute( insrt.values(name=category) .on_conflict_do_update( index_elements=['name'], set_={'name': insrt.excluded.name} ) .returning(Category.id) ).fetchone()[0] self.db.execute(postgresql.insert(Association_Scan_Category) .values(scan=scanid, category=catid) .on_conflict_do_nothing()) for port in host.get('ports', []): scripts = port.pop('scripts', []) # FIXME: handle screenshots for fld in ['screendata', 'screenshot', 'screenwords', 'service_method']: try: del port[fld] except KeyError: pass if 'service_servicefp' in port: port['service_fp'] = port.pop('service_servicefp') if 'state_state' in port: port['state'] = port.pop('state_state') if 'state_reason_ip' in port: port['state_reason_ip'] = self.convert_ip( port['state_reason_ip'] ) if merge: insrt = postgresql.insert(Port) portid = self.db.execute( insrt.values(scan=scanid, **port) .on_conflict_do_update( index_elements=['scan', 'port', 'protocol'], set_=dict( scan=scanid, **(port if newest else {}) ) ) .returning(Port.id) ).fetchone()[0] else: portid = self.db.execute(insert(Port).values(scan=scanid, **port) .returning(Port.id)).fetchone()[0] for script in scripts: name, output = script.pop('id'), script.pop('output') if merge: if newest: insrt = postgresql.insert(Script) self.bulk.append(insrt .values( port=portid, name=name, output=output, data=script ) .on_conflict_do_update( index_elements=['port', 'name'], set_={ "output": insrt.excluded.output, "data": insrt.excluded.data, }, )) else: insrt = postgresql.insert(Script) self.bulk.append(insrt .values( port=portid, name=name, output=output, data=script ) .on_conflict_do_nothing()) else: self.bulk.append(insert(Script).values( port=portid, name=name, output=output, data=script )) if not merge: # FIXME: handle traceroutes on merge for trace in host.get('traces', []): traceid = self.db.execute(insert(Trace).values( scan=scanid, port=trace.get('port'), protocol=trace['protocol'] ).returning(Trace.id)).fetchone()[0] for hop in trace.get('hops'): hop['ipaddr'] = self.convert_ip(hop['ipaddr']) self.bulk.append(insert(Hop).values( trace=traceid, ipaddr=self.convert_ip(hop['ipaddr']), ttl=hop["ttl"], rtt=None if hop["rtt"] == '--' else hop["rtt"], host=hop.get("host"), domains=hop.get("domains"), )) # FIXME: handle hostnames on merge for hostname in host.get('hostnames', []): self.bulk.append(insert(Hostname).values( scan=scanid, domains=hostname.get('domains'), name=hostname.get('name'), type=hostname.get('type'), )) utils.LOGGER.debug("HOST STORED: %r", scanid)