def _bet_list(filter): bets = db.session.query(Bet).filter(filter(Bet)).all() yes_bets = db.session.query(Outcome, func.max_(Bid.price))\ .join(Outcome.bet).join(Outcome.bids).filter(and_(filter(Bet), Bid.yes_bid == True)).group_by(Outcome).all() no_bets = db.session.query(Outcome, func.max_(Bid.price))\ .join(Outcome.bet).join(Outcome.bids).filter(and_(filter(Bet), Bid.yes_bid == False)).group_by(Outcome).all() current_app.logger.info(yes_bets) current_app.logger.info(no_bets) return bets, {k: v for (k, v) in yes_bets}, {k: v for (k, v) in no_bets}
def insert_or_update_bulk(self, specs, getinfos=None, separated_timestamps=True): """Like `.insert_or_update()`, but `specs` parameter has to be an iterable of `(timestamp, spec)` (if `separated_timestamps` is True) or `spec` (if it is False) values. This will perform PostgreSQL COPY FROM inserts with the major drawback that the `getinfos` parameter will be called (if it is not `None`) for each spec, even when the spec already exists in the database and the call was hence unnecessary. It's up to you to decide whether having bulk insert is worth it or if you want to go with the regular `.insert_or_update()` method. """ more_to_read = True tmp = self.create_tmp_table(self.tables.passive) if config.DEBUG_DB: total_upserted = 0 total_start_time = time.time() while more_to_read: if config.DEBUG_DB: start_time = time.time() with PassiveCSVFile(specs, self.convert_ip, tmp, getinfos=getinfos, separated_timestamps=separated_timestamps, limit=config.POSTGRES_BATCH_SIZE) as fdesc: self.copy_from(fdesc, tmp.name) more_to_read = fdesc.more_to_read if config.DEBUG_DB: count_upserted = fdesc.count insrt = postgresql.insert(self.tables.passive) self.db.execute( insrt.from_select( [column(col) for col in [ 'addr', # sum / min / max 'count', 'firstseen', 'lastseen', # grouped 'sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo' ]], select([tmp.columns['addr'], func.sum_(tmp.columns['count']), func.min_(tmp.columns['firstseen']), func.max_(tmp.columns['lastseen'])] + [ tmp.columns[col] for col in [ 'sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo']])\ .group_by(*(tmp.columns[col] for col in [ 'addr', 'sensor', 'port', 'recontype', 'source', 'targetval', 'value', 'fullvalue', 'info', 'moreinfo' ])) )\ .on_conflict_do_update( index_elements=['addr', 'sensor', 'recontype', 'port', 'source', 'value', 'targetval', 'info'], set_={ 'firstseen': func.least( self.tables.passive.firstseen, insrt.excluded.firstseen, ), 'lastseen': func.greatest( self.tables.passive.lastseen, insrt.excluded.lastseen, ), 'count': self.tables.passive.count + insrt.excluded.count, }, ) ) self.db.execute(delete(tmp)) if config.DEBUG_DB: stop_time = time.time() time_spent = stop_time - start_time total_upserted += count_upserted total_time_spent = stop_time - total_start_time utils.LOGGER.debug( "DB:PERFORMANCE STATS %s upserts, %f s, %s/s\n" "\ttotal: %s upserts, %f s, %s/s", utils.num2readable(count_upserted), time_spent, utils.num2readable(count_upserted / time_spent), utils.num2readable(total_upserted), total_time_spent, utils.num2readable(total_upserted / total_time_spent), )