def scorecard(self): sc = Query([ Game.gid, Game.player_id, type_coerce(self._XL(10) * 10, Integer).label("xl"), type_coerce(self._win() * 15, Integer).label("win"), type_coerce(self._realtime(6000) * 20, Integer).label("time"), type_coerce(self._turncount("Dis:1", 30000) * 20, Integer).label("turns"), type_coerce(self._rune("Slime:5") * 10, Integer).label("slimy"), type_coerce(self._rune("Vaults:3") * 10, Integer).label("silver"), type_coerce(self._rune("Dis:2") * 10, Integer).label("iron"), type_coerce(self._rune("Tar:2") * 10, Integer).label("bone"), type_coerce(self._rune("Geh:2") * 10, Integer).label("obsidian"), type_coerce(self._rune("Coc:2") * 10, Integer).label("icy"), type_coerce(self._rune("Pan") * 20, Integer).label("pan"), type_coerce(self._god("Qazlal") * 6, Integer).label("qaz"), type_coerce(self._god("Jiyva") * 6, Integer).label("jiyva"), type_coerce(self._god("Lugonu") * 6, Integer).label("lucy"), type_coerce(self._god("Cheibriados") * 6, Integer).label("chei"), ]).filter(Game.gid.in_(self.gids)).subquery() return Query([Player, Game]).select_from(Player).outerjoin( Game, Game.gid == sc.c.gid).add_columns( sc.c.xl, sc.c.win, sc.c.time, sc.c.turns, sc.c.slimy, sc.c.silver, sc.c.iron, sc.c.bone, sc.c.obsidian, sc.c.icy, sc.c.pan, sc.c.qaz, sc.c.chei, sc.c.lucy, sc.c.jiyva, func.max(sc.c.xl + sc.c.win).label("subtotal"), func.max(sc.c.xl + sc.c.win + sc.c.time + sc.c.turns + sc.c.slimy + sc.c.silver + sc.c.iron + sc.c.bone + sc.c.obsidian + sc.c.icy + sc.c.pan + sc.c.qaz + sc.c.chei + sc.c.lucy + sc.c.jiyva).label("total")).group_by( sc.c.player_id).order_by(desc("total"), Game.start)
def get_cboe_sym_ts_from_db(query_start_date, query_end_date, symbol_list=['SPX'], px_types=['ALL']): """ Get timeseries data from mysql :param symbol_list: :param query_start_date: :param query_end_date: :param px_types: Open, High, Low, Close, LastSale :return: """ list_poc_db_col_nm = px_types session = get_db_session() if px_types[0] == 'ALL': list_poc_db_col_nm = ['Id', 'Symbol', 'Open', 'High', 'Low', 'Close', 'LastSale', 'LastTime'] the_columns = [getattr(VixTermStructure, poc_db_col_nm) for poc_db_col_nm in list_poc_db_col_nm] q = Query(the_columns, session=session) else: list_poc_db_col_nm = ['Id', 'Symbol'] + px_types + ['LastSale', 'LastTime'] the_columns = [getattr(VixTermStructure, poc_db_col_nm) for poc_db_col_nm in list_poc_db_col_nm] q = Query(the_columns, session=session) from_db = q.filter(VixTermStructure.Id >= str(query_start_date), VixTermStructure.Id <= str(query_end_date), VixTermStructure.Symbol.in_(symbol_list)).all() df = pd.DataFrame().from_records(from_db) # we are going to need to create a MULTI-INDEX for this returned Dataframe # first level is the DATE (Id) and second level is the Symbol (symbol) # TODO: LEFT OFF HERE ON JUNE 13th, before heading out to pick up Bane with Nada. df.columns = list_poc_db_col_nm df.set_index(['Id', 'Symbol'], inplace=True) return df
def generate_query(model, path, filters=None): """ Generate a SELECT query to fetch `model` ids along the path with given `filters` on the last model in `path`. :param model: A :ref:`declarative <sqla:declarative_toplevel>` class. :param path: :param [sqlalchemy.sql.expression.BinaryExpression] filters: :rtype: A :ref:`sqlalchemy.orm.query.Query` object """ # We start with the query selecting the ids of the models we want to return. query = Query(model.id) if path: # In case path is not blank, we need to alias the model id while joining # to prevent referencing the same table again. query = (Query(aliased(model).id)) # The below is a fix in case the same table is joined # multiple times. In that case, we alias everything except # the last path and then filter on the last path. path_list = path.split(".") last_path = path_list[-1] path_list = path_list[:-1] if path_list: query = query.join(*path_list, aliased=True) # The last path is purposfully left out from being aliased to make it easier # to contrunct filter conditions. query = query.join(last_path, from_joinpoint=True) if filters is not None: if isinstance(filters, list): query = query.filter(*filters) else: query = query.filter(filters) return query
def scorecard(self): sc = Query([ Game.gid, Game.player_id, type_coerce(self._uniq(), Integer).label("uniq"), type_coerce(self._brenter(), Integer).label("brenter"), type_coerce(self._brend(), Integer).label("brend"), type_coerce(self._god(), Integer).label("god"), type_coerce(self._rune(1), Integer).label("rune"), type_coerce(self._rune(3), Integer).label("threerune"), self._win().label("win"), self._bonus(self.tier1).label("bonusone"), self._bonus(self.tier2).label("bonustwo"), ]).filter(Game.gid.in_(self.gids)).subquery() return Query( [Player, Game]).select_from(CsdcContestant).join(Player).outerjoin( sc, CsdcContestant.player_id == sc.c.player_id).outerjoin( Game, Game.gid == sc.c.gid).add_columns( sc.c.uniq, sc.c.brenter, sc.c.brend, sc.c.god, sc.c.rune, sc.c.threerune, sc.c.win, sc.c.bonusone, sc.c.bonustwo, func.max(sc.c.uniq + sc.c.brenter + sc.c.brend + sc.c.god + sc.c.rune + sc.c.threerune + sc.c.win + sc.c.bonusone + sc.c.bonustwo).label("total")).group_by( CsdcContestant.player_id).order_by( desc("total"), Game.start)
def run(): db = DBManager() jq = JQData() start_date = datetime.now().strftime("%Y%m%d") date_id = datetime.strftime(parse(start_date) + relativedelta(days=-30), '%Y%m%d') stock_pool = pd.read_csv(os.path.join(PROJECT_DIR, "config/pool.txt"), dtype=str, sep=' ', names=['tag', 'name', 'code', 'price']) stocks = stock_pool['code'].map(lambda x: jq.normalize_code(x)).tolist() df = finance.run_query(Query(finance.STK_HK_HOLD_INFO).filter( or_(finance.STK_HK_HOLD_INFO.link_id == i for i in [310001, 310002]), or_(finance.STK_HK_HOLD_INFO.code == i for i in stocks), finance.STK_HK_HOLD_INFO.day >= date_id ))[['day', 'name', 'share_ratio']] df['day'] = df['day'].map(lambda x: datetime.strftime(x, "%Y%m%d")) trade_day = finance.run_query(Query(finance.STK_EXCHANGE_LINK_CALENDAR).filter( finance.STK_EXCHANGE_LINK_CALENDAR.day >= date_id, or_(finance.STK_EXCHANGE_LINK_CALENDAR.link_id == i for i in [310001, 310002])) )[['day', 'type']].drop_duplicates() trade_day['day'] = trade_day['day'].map(lambda x: datetime.strftime(x, "%Y%m%d")) merge_df = df.merge(trade_day, on='day', how='left') merge_df = merge_df[merge_df['type'] == '正常交易日'] del merge_df['type'] db.write(merge_df, 'tail_northup', mode='w')
def test_underscore_update_many_query(self, mock_clean): from sqlalchemy.orm.query import Query items = Query('asd') clean_items = Query("ASD") clean_items.all = Mock(return_value=[1, 2, 3]) clean_items.update = Mock() mock_clean.return_value = clean_items count = docs.BaseMixin._update_many(items, {'foo': 'bar'}) mock_clean.assert_called_once_with(items) clean_items.update.assert_called_once_with({'foo': 'bar'}, synchronize_session='fetch') assert count == clean_items.update()
def test_underscore_delete_many_query(self, mock_clean, mock_on_bulk): from sqlalchemy.orm.query import Query items = Query('asd') clean_items = Query("ASD") clean_items.all = Mock(return_value=[1, 2, 3]) clean_items.delete = Mock() mock_clean.return_value = clean_items count = docs.BaseMixin._delete_many(items) mock_clean.assert_called_once_with(items) clean_items.delete.assert_called_once_with(synchronize_session=False) mock_on_bulk.assert_called_once_with(docs.BaseMixin, [1, 2, 3], None) assert count == clean_items.delete()
def get_query(self, session: Session, *args) -> Query: """ Return a Query object initialized with our model :param session: Session object :return: Query object """ if session is None: raise ValueError('invalid session object') # https://docs.sqlalchemy.org/en/latest/orm/query.html#the-query-object if args: return Query(*args, session=session) return Query(self.model, session=session)
def query_entities(*entities): base_list = ["stockcode", "tradedate", "end_date", "announce_date", "rpt_year", "rpt_quarter"] columns = [ Fundamentals.fundamental_base.stockcode, Fundamentals.fundamental_base.tradedate, Fundamentals.fundamental_base.rpt_year, Fundamentals.fundamental_base.rpt_quarter, ] for ele in entities: if isinstance(ele, DeclarativeMeta): deprecated_list = deprecated_fundamental_data("data_point") query_list = [ v for k, v in ele.__dict__.items() if not k.startswith("_") and k not in base_list and k not in deprecated_list ] columns.extend(query_list) elif isinstance(ele, InstrumentedAttribute): name = str(ele).split(".")[-1] if name in ["stockcode", "tradedate", "rpt_year", "rpt_quarter"]: continue columns.append(ele) else: raise ValueError( "Invalid metrics to query, it maybe not specify metrics, " "please check the metrics in query." ) return Query(columns)
def __init__(self, **kwargs): with get_session() as s: self.number = kwargs["number"] self.species = get_species(s, kwargs["species"]) self.background = get_background(s, kwargs["background"]) self.start = kwargs["start"] self.end = kwargs["end"] # todo: clean up the retry removal g1 = aliased(Game) g2 = aliased(Game) possiblegames = self._valid_games(g1).add_columns( g1.player_id, g1.start, g1.end).filter( g1.gid.in_( self._valid_games(g2).filter( g2.player_id == g1.player_id).order_by( g2.start).limit(2))).join( latestmilestones, g1.gid == latestmilestones.c.gid).add_column( latestmilestones.c.xl).cte() pg2 = possiblegames.alias() self.gids = Query(possiblegames.c.gid).outerjoin( pg2, and_( pg2.c.player_id == possiblegames.c.player_id, possiblegames.c.start > pg2.c.start)).filter(pg2.c.gid == None)
def get_purchase_data(self, purchase_id): purchase = Query(Purchase).with_session( session=self.session).filter_by(id=purchase_id).first() if purchase: return purchase.rounded_price, purchase.currency_code, purchase.note, purchase_id else: return None, None, None, None
def run(): jq = JQData() db = DBManager() total_df = pd.DataFrame() start_date = datetime.now().strftime("%Y%m%d") for i in range(1): date_id = datetime.strftime( parse(start_date) + relativedelta(days=i), '%Y%m%d') df = finance.run_query( Query(finance.FUT_MEMBER_POSITION_RANK).filter( finance.FUT_MEMBER_POSITION_RANK.exchange == 'CCFX', finance.FUT_MEMBER_POSITION_RANK.underlying_code == 'IC', finance.FUT_MEMBER_POSITION_RANK.day == date_id, # finance.FUT_MEMBER_POSITION_RANK.day == datetime.now().strftime("%Y%m%d"), finance.FUT_MEMBER_POSITION_RANK.member_name == '中信期货')) total_df = total_df.append(df, ignore_index=True) trans_df = \ total_df[['day', 'code', 'indicator', 'indicator_increase', 'rank_type_ID']].groupby(['day', 'rank_type_ID'])[ ['indicator', 'indicator_increase']].sum().unstack('rank_type_ID') trans_df.columns = [ 'total', 'buy', 'sell', 'total_inc', 'buy_inc', 'sell_inc' ] if len(trans_df) > 0: trans_df.reset_index(inplace=True) db.write(trans_df, "tail_zxqh_v2")
def get_market_cap(security: Union[str, List[str], pd.DataFrame], date=None, circulation=True): """ 获取市值(流通市值)数据 :param security: 字符串 、 列表 、 DataFrame :param date: :param circulation: 是否流通市值 :return: 数值 、 字典 、 DataFrame """ if not date: date = BasicBag.get_current_tradeday() if isinstance(security, str): security_list = [security] elif isinstance(security, pd.DataFrame): security_list = security.index.tolist() elif isinstance(security, list): security_list = security else: raise TypeError("security parameter type is error") q = Query(jq.valuation).filter(jq.valuation.code.in_(security_list)) df = jq.get_fundamentals(q, date) # 打印出总市值 key = 'circulating_market_cap' if circulation else 'market_cap' if isinstance(security, pd.DataFrame): return pd.concat([security.reset_index(), df[key]], axis=1).set_index("index") elif isinstance(security, str): return df[key][0].item() else: return dict(zip(security, df[key].values.tolist()))
def test_get_filter_args(self): mixin = SQLAlchemyMixin() query = Query(self.TableForSQLAlchemyMixin) filter_arguments = list(mixin._get_filter_args(query, {'id': 1})) self.assertEqual(len(filter_arguments), 1) self.assertEqual(filter_arguments[0].__str__(), (self.TableForSQLAlchemyMixin.id == 1).__str__())
def overview(): q = Query(CsdcContestant) sc = onetimescorecard().subquery() q = q.outerjoin(sc, CsdcContestant.player_id == sc.c.player_id) totalcols = [] wktotal = [] wkbonuses = [] for col in ("fifteenrune", "sub40k", "zig", "lowxlzot", "nolairwin", "asceticrune"): totalcols.append(func.ifnull(getattr(sc.c, col), 0)) q = q.add_column(getattr(sc.c, col).label(col)) for wk in weeks: a = wk.sortedscorecard().subquery() totalcols.append(func.ifnull(a.c.total, 0)) wktotal.append(a.c.total) wkbonuses.append(func.ifnull(a.c.bonusone, 0) + func.ifnull(a.c.bonustwo, 0)) q = q.outerjoin(a, CsdcContestant.player_id == a.c.player_id ).add_column( a.c.total.label("wk" + wk.number)) return q.add_columns( sc.c.account_id.label("account_id"), sum(totalcols).label("grandtotal"), sum(wkbonuses).label("tiebreak"), sc.c.hiscore.label("hiscore"), (func.coalesce(*wktotal) != None).label("played") ).order_by(desc("grandtotal"),desc("tiebreak"),desc("hiscore"),desc("played"))
def delete_current_purchase(self): Query(Purchase).with_session(session=self.session).filter_by( id=self.purchase.id).delete() self.decrement_open_purchases_of_conversation() self.close_conversation() self.session.commit()
def __init__(self, **kwargs): with get_session() as s: self.number = kwargs["number"] self.species = get_species(s, kwargs["species"]) self.background = get_background(s, kwargs["background"]) self.gods = [get_god(s, g) for g in kwargs["gods"]] self.start = kwargs["start"] self.end = kwargs["end"] self.tier1 = kwargs.get("bonus1", NoBonus) self.tier2 = kwargs.get("bonus2", NoBonus) g1 = aliased(Game) g2 = aliased(Game) possiblegames = self._valid_games(g1).add_columns( g1.player_id, g1.start, g1.end).filter( g1.gid.in_( self._valid_games(g2).filter( g2.player_id == g1.player_id).order_by( g2.start).limit(2))).join( latestmilestones, g1.gid == latestmilestones.c.gid).add_column( latestmilestones.c.xl).cte() pg2 = possiblegames.alias() self.gids = Query(possiblegames.c.gid).outerjoin( pg2, and_(pg2.c.player_id == possiblegames.c.player_id, possiblegames.c.start > pg2.c.start)).filter( or_(pg2.c.gid == None, and_(pg2.c.end != None, pg2.c.xl < 5)))
def overview(): q = Query(Player) totalcols = [] for wk in weeks: wk_n = "wk" + wk.number a = wk.scorecard().subquery() q = q.outerjoin(a, Player.id == a.c.player_id).add_column( a.c.subtotal.label(wk_n) ).add_column(a.c.time.label(wk_n + "time")).add_column( a.c.turns.label(wk_n + "turns")).add_column( a.c.slimy.label(wk_n + "slimy")).add_column( a.c.silver.label(wk_n + "silver")).add_column( a.c.iron.label(wk_n + "iron")).add_column( a.c.bone.label(wk_n + "bone")).add_column( a.c.obsidian.label(wk_n + "obsidian") ).add_column( a.c.icy.label(wk_n + "icy")).add_column( a.c.pan.label(wk_n + "pan")).add_column( a.c.qaz.label(wk_n + "qaz") ).add_column(a.c.chei.label( wk_n + "chei")).add_column( a.c.lucy.label(wk_n + "lucy") ).add_column( a.c.jiyva.label(wk_n + "jiyva")) return q
def test_normalize_query_by_date(): date_label = 'date_label' report_label = 'count_label' query = Query([ Attendee.birthdate.label(date_label), func.count(Attendee.id).label(report_label) ]).group_by(date_label) query = normalize_query_by_date( query, date_label, report_label, start_date=UTC20DAYSAGO, end_date=UTCNOW) expected = """\ SELECT date_label, coalesce(count_label, :param_1) AS count_label \n\ FROM (\ SELECT attendee.birthdate AS date_label, count(attendee.id) AS count_label \n\ FROM attendee GROUP BY date_label \ UNION \ SELECT generate_series(\ :generate_series_1, \ :generate_series_2, \ :generate_series_3) AS date_label, :param_2 AS count_label) AS anon_1 \ ORDER BY anon_1.date_label""" assert expected == str(query)
def generate_query(model, path, filters=None): """ Generate a SELECT query to fetch `model` ids along the path with given `filters` on the last model in `path`. :param model: A :ref:`declarative <sqla:declarative_toplevel>` class. :param path: :param [sqlalchemy.sql.expression.BinaryExpression] filters: :rtype: A :ref:`sqlalchemy.orm.query.Query` object """ query = Query(model.id) if path: # The below is a fix in case the same table is joined # multiple times. In that case, we alias everything except # the last path and then filter on the last path. path_list = path.split(".") last_path = path_list[-1] path_list = path_list[:-1] if path_list: query = query.join(*path_list, aliased=True) query = query.join(last_path, from_joinpoint=True) if filters is not None: if isinstance(filters, list): query = query.filter(*filters) else: query = query.filter(filters) return query
def element_atomic_weight(self, zeq, reference=None): z = self._get_z(zeq) q = Query(ElementAtomicWeightProperty.value) q = q.join(Element) q = q.filter(Element.z == z) exception = ValueError('Unknown atomic weight for z="{0}" and ' 'reference="{1}"'.format(z, reference)) return self._query_with_references(q, exception, reference)
def element_mass_density_kg_per_m3(self, zeq, reference=None): z = self._get_z(zeq) q = Query(ElementMassDensityProperty.value_kg_per_m3) q = q.join(Element) q = q.filter(Element.z == z) exception = ValueError('Unknown mass density for z="{0}" and ' 'reference="{1}"'.format(z, reference)) return self._query_with_references(q, exception, reference)
def __init__(cls, classname, bases, dict_): super().__init__(classname, bases, dict_) cls.query = None if getattr(cls, "__tablename__", None): db = DataBase( ) # safe to start a new db because DataBase is a singleton. cls.query = Query(cls, session=db.session)
def test_query_column_name(self): # test for bug: http://groups.google.com/group/geoalchemy/browse_thread/thread/6b731dd1673784f9 from sqlalchemy.orm.query import Query query = Query(Road.road_geom).filter(Road.road_geom == '..').__str__() ok_('AsBinary(roads.road_geom)' in query, 'table name is part of the column expression (select clause)') ok_('WHERE Equals(roads.road_geom' in query, 'table name is part of the column expression (where clause)') query_wkb = Select([Road.road_geom ]).where(Road.road_geom == 'POINT(0 0)').__str__() ok_('SELECT AsBinary(roads.road_geom)' in query_wkb, 'AsBinary is added') ok_('WHERE Equals(roads.road_geom' in query_wkb, 'AsBinary is not added in where clause') # test for RAW attribute query_wkb = Select([Road.road_geom.RAW]).__str__() ok_('SELECT roads.road_geom' in query_wkb, 'AsBinary is not added') ok_(session.query(Road.road_geom.RAW).first()) query_srid = Query(func.SRID(Road.road_geom.RAW)) ok_('SRID(roads.road_geom)' in query_srid.__str__(), 'AsBinary is not added') ok_(session.scalar(query_srid)) eq_( session.scalar( Select([func.SRID(Spot.spot_location) ]).where(Spot.spot_id == 1)), None, 'AsBinary is added and the SRID is not returned') eq_( str( session.scalar( Select([func.SRID(Spot.spot_location.RAW) ]).where(Spot.spot_id == 1))), '4326', 'AsBinary is not added and the SRID is returned') spot_alias = aliased(Spot) query_wkt = Select([func.wkt(spot_alias.spot_location.RAW)]).__str__() ok_('SELECT wkt(spots_1.spot_location' in query_wkt, 'Table alias is used in select clause') ok_('FROM spots AS spots_1' in query_wkt, 'Table alias is used in from clause')
def _brend(self): with get_session() as s: verb_id = get_verb(s, "br.end").id multilevel_places = Query(Place.id).join(Branch).filter( Branch.multilevel) return self._valid_milestone().filter( Milestone.place_id.in_(multilevel_places), Milestone.verb_id == verb_id).exists()
def load_initial_data(self, message_id, position): purchase = Query(Purchase).with_session(session=self.session).\ join(Conversation.purchases).\ filter_by( user_message_id=message_id, position=position, ).one() self.purchase = purchase conversation = Query(Conversation).with_session(session=self.session).\ filter_by( id=purchase.conversation_id ).one() self.conversation = conversation self.conversation_open_purchases_count = Query(Purchase).with_session(session=self.session).\ filter_by( status=PurchaseStatus.open, conversation_id=self.conversation.id ).count()
def element_name(self, zeq, language='en', reference=None): z = self._get_z(zeq) q = Query(ElementNameProperty.name) q = q.filter(ElementNameProperty.language_code == language) q = q.join(Element) q = q.filter(Element.z == z) exception = ValueError('Unknown name for z="{0}", ' 'language="{1}" and ' 'reference="{2}"' .format(z, language, reference)) return self._query_with_references(q, exception, reference)
def overview(): q = Query(CsdcContestant) totalcols = [] for wk in weeks: a = wk.scorecard().subquery() totalcols.append(func.ifnull(a.c.total, 0)) q = q.outerjoin(a, CsdcContestant.player_id == a.c.player_id).add_column( a.c.total.label("wk" + wk.number)) return q.add_column(sum(totalcols).label("grandtotal")).order_by( desc("grandtotal"))
def query(self, *entities, **options): if len(entities) == 1: ent_obj = entities[0] if isinstance(ent_obj, type) and issubclass(ent_obj, Entity): ent_cls = ent_obj else: # Assume that a mapper was passed by SQLAlchemy. ent_cls = ent_obj.entity q = self.__query_factory(ent_cls, options) else: q = Query(entities, self, **options) return q
def filter_valid_annotations(query): # TODO: Document this. What's going on in this filter? queries = [ Query(func.max(getattr(m, "annotation_id"))).group_by( getattr( m, # Automatically turn (for example) ReleaseAnnotation # into release_id m.__tablename__.replace("_annotation", "_id"))) for m in models ] filter_query = queries[0].union_all(*queries[1:]) return query.filter(Annotation.id.in_(filter_query))